1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS
,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result
;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant
;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax
= FALSE
;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set
*legacy_cpu
= NULL
;
158 static const arm_feature_set
*legacy_fpu
= NULL
;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
162 static arm_feature_set
*mcpu_ext_opt
= NULL
;
163 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set
*march_cpu_opt
= NULL
;
167 static arm_feature_set
*march_ext_opt
= NULL
;
168 static const arm_feature_set
*march_fpu_opt
= NULL
;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set
*mfpu_opt
= NULL
;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
176 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
179 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
180 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
182 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
184 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
187 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
190 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
191 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
192 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
193 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
194 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
195 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
196 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
197 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
198 static const arm_feature_set arm_ext_v4t_5
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
200 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
201 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
202 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
203 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
204 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
205 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
206 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2
=
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
210 static const arm_feature_set arm_ext_v6_notm
=
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
212 static const arm_feature_set arm_ext_v6_dsp
=
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
214 static const arm_feature_set arm_ext_barrier
=
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
216 static const arm_feature_set arm_ext_msr
=
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
218 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
219 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
220 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
221 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
225 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
226 static const arm_feature_set arm_ext_m
=
227 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
228 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
229 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
230 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
231 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
232 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
233 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
234 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
235 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
236 static const arm_feature_set arm_ext_v8m_main
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
238 static const arm_feature_set arm_ext_v8_1m_main
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only
=
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
243 static const arm_feature_set arm_ext_v6t2_v8m
=
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics
=
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp
=
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
253 static const arm_feature_set arm_ext_ras
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16
=
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
258 static const arm_feature_set arm_ext_fp16_fml
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
260 static const arm_feature_set arm_ext_v8_2
=
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
262 static const arm_feature_set arm_ext_v8_3
=
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
264 static const arm_feature_set arm_ext_sb
=
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
266 static const arm_feature_set arm_ext_predres
=
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
269 static const arm_feature_set arm_arch_any
= ARM_ANY
;
271 static const arm_feature_set fpu_any
= FPU_ANY
;
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
275 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
277 static const arm_feature_set arm_cext_iwmmxt2
=
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
279 static const arm_feature_set arm_cext_iwmmxt
=
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
281 static const arm_feature_set arm_cext_xscale
=
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
283 static const arm_feature_set arm_cext_maverick
=
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
285 static const arm_feature_set fpu_fpa_ext_v1
=
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
287 static const arm_feature_set fpu_fpa_ext_v2
=
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
289 static const arm_feature_set fpu_vfp_ext_v1xd
=
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
291 static const arm_feature_set fpu_vfp_ext_v1
=
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
293 static const arm_feature_set fpu_vfp_ext_v2
=
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
295 static const arm_feature_set fpu_vfp_ext_v3xd
=
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
297 static const arm_feature_set fpu_vfp_ext_v3
=
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
299 static const arm_feature_set fpu_vfp_ext_d32
=
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
301 static const arm_feature_set fpu_neon_ext_v1
=
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
305 static const arm_feature_set mve_ext
=
306 ARM_FEATURE_COPROC (FPU_MVE
);
307 static const arm_feature_set mve_fp_ext
=
308 ARM_FEATURE_COPROC (FPU_MVE_FP
);
310 static const arm_feature_set fpu_vfp_fp16
=
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
312 static const arm_feature_set fpu_neon_ext_fma
=
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
315 static const arm_feature_set fpu_vfp_ext_fma
=
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
317 static const arm_feature_set fpu_vfp_ext_armv8
=
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
319 static const arm_feature_set fpu_vfp_ext_armv8xd
=
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
321 static const arm_feature_set fpu_neon_ext_armv8
=
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
323 static const arm_feature_set fpu_crypto_ext_armv8
=
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
325 static const arm_feature_set crc_ext_armv8
=
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
327 static const arm_feature_set fpu_neon_ext_v8_1
=
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
329 static const arm_feature_set fpu_neon_ext_dotprod
=
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
332 static int mfloat_abi_opt
= -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
335 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
338 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
342 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu
= FPU_NONE
;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name
[20];
350 extern FLONUM_TYPE generic_floating_point_number
;
352 /* Return if no cpu was selected on command-line. */
354 no_cpu_selected (void)
356 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
361 static int meabi_flags
= EABI_DEFAULT
;
363 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
366 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
371 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS
* GOT_symbol
;
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
384 static int thumb_mode
= 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
393 IMPLICIT_IT_MODE_NEVER
= 0x00,
394 IMPLICIT_IT_MODE_ARM
= 0x01,
395 IMPLICIT_IT_MODE_THUMB
= 0x02,
396 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
398 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
412 Important differences from the old Thumb mode:
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
423 static bfd_boolean unified_syntax
= FALSE
;
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars
[] = "#[]{}";
444 enum neon_el_type type
;
448 #define NEON_MAX_TYPE_ELS 4
452 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
456 enum pred_instruction_type
462 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
463 if inside, should be the last one. */
464 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
465 i.e. BKPT and NOP. */
466 IT_INSN
, /* The IT insn has been parsed. */
467 VPT_INSN
, /* The VPT/VPST insn has been parsed. */
468 MVE_OUTSIDE_PRED_INSN
/* Instruction to indicate a MVE instruction without
469 a predication code. */
472 /* The maximum number of operands we need. */
473 #define ARM_IT_MAX_OPERANDS 6
474 #define ARM_IT_MAX_RELOCS 3
479 unsigned long instruction
;
483 /* "uncond_value" is set to the value in place of the conditional field in
484 unconditional versions of the instruction, or -1 if nothing is
487 struct neon_type vectype
;
488 /* This does not indicate an actual NEON instruction, only that
489 the mnemonic accepts neon-style type suffixes. */
491 /* Set to the opcode if the instruction needs relaxation.
492 Zero if the instruction is not relaxed. */
496 bfd_reloc_code_real_type type
;
499 } relocs
[ARM_IT_MAX_RELOCS
];
501 enum pred_instruction_type pred_insn_type
;
507 struct neon_type_el vectype
;
508 unsigned present
: 1; /* Operand present. */
509 unsigned isreg
: 1; /* Operand was a register. */
510 unsigned immisreg
: 1; /* .imm field is a second register. */
511 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
512 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
513 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
514 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
515 instructions. This allows us to disambiguate ARM <-> vector insns. */
516 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
517 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
518 unsigned isquad
: 1; /* Operand is SIMD quad register. */
519 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
520 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
521 unsigned writeback
: 1; /* Operand has trailing ! */
522 unsigned preind
: 1; /* Preindexed address. */
523 unsigned postind
: 1; /* Postindexed address. */
524 unsigned negative
: 1; /* Index register was negated. */
525 unsigned shifted
: 1; /* Shift applied to operation. */
526 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
527 } operands
[ARM_IT_MAX_OPERANDS
];
530 static struct arm_it inst
;
532 #define NUM_FLOAT_VALS 8
534 const char * fp_const
[] =
536 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
539 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
549 #define CP_T_X 0x00008000
550 #define CP_T_Y 0x00400000
552 #define CONDS_BIT 0x00100000
553 #define LOAD_BIT 0x00100000
555 #define DOUBLE_LOAD_FLAG 0x00000001
559 const char * template_name
;
563 #define COND_ALWAYS 0xE
567 const char * template_name
;
571 struct asm_barrier_opt
573 const char * template_name
;
575 const arm_feature_set arch
;
578 /* The bit that distinguishes CPSR and SPSR. */
579 #define SPSR_BIT (1 << 22)
581 /* The individual PSR flag bits. */
582 #define PSR_c (1 << 16)
583 #define PSR_x (1 << 17)
584 #define PSR_s (1 << 18)
585 #define PSR_f (1 << 19)
590 bfd_reloc_code_real_type reloc
;
595 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
596 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
601 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
604 /* Bits for DEFINED field in neon_typed_alias. */
605 #define NTA_HASTYPE 1
606 #define NTA_HASINDEX 2
608 struct neon_typed_alias
610 unsigned char defined
;
612 struct neon_type_el eltype
;
615 /* ARM register categories. This includes coprocessor numbers and various
616 architecture extensions' registers. Each entry should have an error message
617 in reg_expected_msgs below. */
646 /* Structure for a hash table entry for a register.
647 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
648 information which states whether a vector type or index is specified (for a
649 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
655 unsigned char builtin
;
656 struct neon_typed_alias
* neon
;
659 /* Diagnostics used when we don't get a register of the expected type. */
660 const char * const reg_expected_msgs
[] =
662 [REG_TYPE_RN
] = N_("ARM register expected"),
663 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
664 [REG_TYPE_CN
] = N_("co-processor register expected"),
665 [REG_TYPE_FN
] = N_("FPA register expected"),
666 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
667 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
668 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
669 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
670 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
671 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
672 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
674 [REG_TYPE_VFC
] = N_("VFP system register expected"),
675 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
676 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
677 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
678 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
679 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
680 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
681 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
682 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
683 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
684 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
685 [REG_TYPE_MQ
] = N_("MVE vector register expected"),
686 [REG_TYPE_RNB
] = N_("")
689 /* Some well known registers that we refer to directly elsewhere. */
695 /* ARM instructions take 4bytes in the object file, Thumb instructions
701 /* Basic string to match. */
702 const char * template_name
;
704 /* Parameters to instruction. */
705 unsigned int operands
[8];
707 /* Conditional tag - see opcode_lookup. */
708 unsigned int tag
: 4;
710 /* Basic instruction code. */
713 /* Thumb-format instruction code. */
716 /* Which architecture variant provides this instruction. */
717 const arm_feature_set
* avariant
;
718 const arm_feature_set
* tvariant
;
720 /* Function to call to encode instruction in ARM format. */
721 void (* aencode
) (void);
723 /* Function to call to encode instruction in Thumb format. */
724 void (* tencode
) (void);
726 /* Indicates whether this instruction may be vector predicated. */
727 unsigned int mayBeVecPred
: 1;
730 /* Defines for various bits that we will want to toggle. */
731 #define INST_IMMEDIATE 0x02000000
732 #define OFFSET_REG 0x02000000
733 #define HWOFFSET_IMM 0x00400000
734 #define SHIFT_BY_REG 0x00000010
735 #define PRE_INDEX 0x01000000
736 #define INDEX_UP 0x00800000
737 #define WRITE_BACK 0x00200000
738 #define LDM_TYPE_2_OR_3 0x00400000
739 #define CPSI_MMOD 0x00020000
741 #define LITERAL_MASK 0xf000f000
742 #define OPCODE_MASK 0xfe1fffff
743 #define V4_STR_BIT 0x00000020
744 #define VLDR_VMOV_SAME 0x0040f000
746 #define T2_SUBS_PC_LR 0xf3de8f00
748 #define DATA_OP_SHIFT 21
749 #define SBIT_SHIFT 20
751 #define T2_OPCODE_MASK 0xfe1fffff
752 #define T2_DATA_OP_SHIFT 21
753 #define T2_SBIT_SHIFT 20
755 #define A_COND_MASK 0xf0000000
756 #define A_PUSH_POP_OP_MASK 0x0fff0000
758 /* Opcodes for pushing/poping registers to/from the stack. */
759 #define A1_OPCODE_PUSH 0x092d0000
760 #define A2_OPCODE_PUSH 0x052d0004
761 #define A2_OPCODE_POP 0x049d0004
763 /* Codes to distinguish the arithmetic instructions. */
774 #define OPCODE_CMP 10
775 #define OPCODE_CMN 11
776 #define OPCODE_ORR 12
777 #define OPCODE_MOV 13
778 #define OPCODE_BIC 14
779 #define OPCODE_MVN 15
781 #define T2_OPCODE_AND 0
782 #define T2_OPCODE_BIC 1
783 #define T2_OPCODE_ORR 2
784 #define T2_OPCODE_ORN 3
785 #define T2_OPCODE_EOR 4
786 #define T2_OPCODE_ADD 8
787 #define T2_OPCODE_ADC 10
788 #define T2_OPCODE_SBC 11
789 #define T2_OPCODE_SUB 13
790 #define T2_OPCODE_RSB 14
792 #define T_OPCODE_MUL 0x4340
793 #define T_OPCODE_TST 0x4200
794 #define T_OPCODE_CMN 0x42c0
795 #define T_OPCODE_NEG 0x4240
796 #define T_OPCODE_MVN 0x43c0
798 #define T_OPCODE_ADD_R3 0x1800
799 #define T_OPCODE_SUB_R3 0x1a00
800 #define T_OPCODE_ADD_HI 0x4400
801 #define T_OPCODE_ADD_ST 0xb000
802 #define T_OPCODE_SUB_ST 0xb080
803 #define T_OPCODE_ADD_SP 0xa800
804 #define T_OPCODE_ADD_PC 0xa000
805 #define T_OPCODE_ADD_I8 0x3000
806 #define T_OPCODE_SUB_I8 0x3800
807 #define T_OPCODE_ADD_I3 0x1c00
808 #define T_OPCODE_SUB_I3 0x1e00
810 #define T_OPCODE_ASR_R 0x4100
811 #define T_OPCODE_LSL_R 0x4080
812 #define T_OPCODE_LSR_R 0x40c0
813 #define T_OPCODE_ROR_R 0x41c0
814 #define T_OPCODE_ASR_I 0x1000
815 #define T_OPCODE_LSL_I 0x0000
816 #define T_OPCODE_LSR_I 0x0800
818 #define T_OPCODE_MOV_I8 0x2000
819 #define T_OPCODE_CMP_I8 0x2800
820 #define T_OPCODE_CMP_LR 0x4280
821 #define T_OPCODE_MOV_HR 0x4600
822 #define T_OPCODE_CMP_HR 0x4500
824 #define T_OPCODE_LDR_PC 0x4800
825 #define T_OPCODE_LDR_SP 0x9800
826 #define T_OPCODE_STR_SP 0x9000
827 #define T_OPCODE_LDR_IW 0x6800
828 #define T_OPCODE_STR_IW 0x6000
829 #define T_OPCODE_LDR_IH 0x8800
830 #define T_OPCODE_STR_IH 0x8000
831 #define T_OPCODE_LDR_IB 0x7800
832 #define T_OPCODE_STR_IB 0x7000
833 #define T_OPCODE_LDR_RW 0x5800
834 #define T_OPCODE_STR_RW 0x5000
835 #define T_OPCODE_LDR_RH 0x5a00
836 #define T_OPCODE_STR_RH 0x5200
837 #define T_OPCODE_LDR_RB 0x5c00
838 #define T_OPCODE_STR_RB 0x5400
840 #define T_OPCODE_PUSH 0xb400
841 #define T_OPCODE_POP 0xbc00
843 #define T_OPCODE_BRANCH 0xe000
845 #define THUMB_SIZE 2 /* Size of thumb instruction. */
846 #define THUMB_PP_PC_LR 0x0100
847 #define THUMB_LOAD_BIT 0x0800
848 #define THUMB2_LOAD_BIT 0x00100000
850 #define BAD_SYNTAX _("syntax error")
851 #define BAD_ARGS _("bad arguments to instruction")
852 #define BAD_SP _("r13 not allowed here")
853 #define BAD_PC _("r15 not allowed here")
854 #define BAD_ODD _("Odd register not allowed here")
855 #define BAD_EVEN _("Even register not allowed here")
856 #define BAD_COND _("instruction cannot be conditional")
857 #define BAD_OVERLAP _("registers may not be the same")
858 #define BAD_HIREG _("lo register required")
859 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
860 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
861 #define BAD_BRANCH _("branch must be last instruction in IT block")
862 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
863 #define BAD_NOT_IT _("instruction not allowed in IT block")
864 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
865 #define BAD_FPU _("selected FPU does not support instruction")
866 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
867 #define BAD_OUT_VPT \
868 _("vector predicated instruction should be in VPT/VPST block")
869 #define BAD_IT_COND _("incorrect condition in IT block")
870 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
871 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
872 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
873 #define BAD_PC_ADDRESSING \
874 _("cannot use register index with PC-relative addressing")
875 #define BAD_PC_WRITEBACK \
876 _("cannot use writeback with PC-relative addressing")
877 #define BAD_RANGE _("branch out of range")
878 #define BAD_FP16 _("selected processor does not support fp16 instruction")
879 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
880 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
881 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
883 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
885 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
887 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
889 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
890 #define BAD_MVE_AUTO \
891 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
892 " use a valid -march or -mcpu option.")
893 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
894 "and source operands makes instruction UNPREDICTABLE")
896 static struct hash_control
* arm_ops_hsh
;
897 static struct hash_control
* arm_cond_hsh
;
898 static struct hash_control
* arm_vcond_hsh
;
899 static struct hash_control
* arm_shift_hsh
;
900 static struct hash_control
* arm_psr_hsh
;
901 static struct hash_control
* arm_v7m_psr_hsh
;
902 static struct hash_control
* arm_reg_hsh
;
903 static struct hash_control
* arm_reloc_hsh
;
904 static struct hash_control
* arm_barrier_opt_hsh
;
906 /* Stuff needed to resolve the label ambiguity
915 symbolS
* last_label_seen
;
916 static int label_is_thumb_function_name
= FALSE
;
918 /* Literal pool structure. Held on a per-section
919 and per-sub-section basis. */
921 #define MAX_LITERAL_POOL_SIZE 1024
922 typedef struct literal_pool
924 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
925 unsigned int next_free_entry
;
931 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
933 struct literal_pool
* next
;
934 unsigned int alignment
;
937 /* Pointer to a linked list of literal pools. */
938 literal_pool
* list_of_pools
= NULL
;
940 typedef enum asmfunc_states
943 WAITING_ASMFUNC_NAME
,
947 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
950 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
952 static struct current_pred now_pred
;
956 now_pred_compatible (int cond
)
958 return (cond
& ~1) == (now_pred
.cc
& ~1);
962 conditional_insn (void)
964 return inst
.cond
!= COND_ALWAYS
;
967 static int in_pred_block (void);
969 static int handle_pred_state (void);
971 static void force_automatic_it_block_close (void);
973 static void it_fsm_post_encode (void);
975 #define set_pred_insn_type(type) \
978 inst.pred_insn_type = type; \
979 if (handle_pred_state () == FAIL) \
984 #define set_pred_insn_type_nonvoid(type, failret) \
987 inst.pred_insn_type = type; \
988 if (handle_pred_state () == FAIL) \
993 #define set_pred_insn_type_last() \
996 if (inst.cond == COND_ALWAYS) \
997 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
999 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1005 /* This array holds the chars that always start a comment. If the
1006 pre-processor is disabled, these aren't very useful. */
1007 char arm_comment_chars
[] = "@";
1009 /* This array holds the chars that only start a comment at the beginning of
1010 a line. If the line seems to have the form '# 123 filename'
1011 .line and .file directives will appear in the pre-processed output. */
1012 /* Note that input_file.c hand checks for '#' at the beginning of the
1013 first line of the input file. This is because the compiler outputs
1014 #NO_APP at the beginning of its output. */
1015 /* Also note that comments like this one will always work. */
1016 const char line_comment_chars
[] = "#";
1018 char arm_line_separator_chars
[] = ";";
1020 /* Chars that can be used to separate mant
1021 from exp in floating point numbers. */
1022 const char EXP_CHARS
[] = "eE";
1024 /* Chars that mean this number is a floating point constant. */
1025 /* As in 0f12.456 */
1026 /* or 0d1.2345e12 */
1028 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
1030 /* Prefix characters that indicate the start of an immediate
1032 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1034 /* Separator character handling. */
1036 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1039 skip_past_char (char ** str
, char c
)
1041 /* PR gas/14987: Allow for whitespace before the expected character. */
1042 skip_whitespace (*str
);
1053 #define skip_past_comma(str) skip_past_char (str, ',')
1055 /* Arithmetic expressions (possibly involving symbols). */
1057 /* Return TRUE if anything in the expression is a bignum. */
1060 walk_no_bignums (symbolS
* sp
)
1062 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1065 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1067 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1068 || (symbol_get_value_expression (sp
)->X_op_symbol
1069 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1075 static bfd_boolean in_my_get_expression
= FALSE
;
1077 /* Third argument to my_get_expression. */
1078 #define GE_NO_PREFIX 0
1079 #define GE_IMM_PREFIX 1
1080 #define GE_OPT_PREFIX 2
1081 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1082 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1083 #define GE_OPT_PREFIX_BIG 3
1086 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1090 /* In unified syntax, all prefixes are optional. */
1092 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1095 switch (prefix_mode
)
1097 case GE_NO_PREFIX
: break;
1099 if (!is_immediate_prefix (**str
))
1101 inst
.error
= _("immediate expression requires a # prefix");
1107 case GE_OPT_PREFIX_BIG
:
1108 if (is_immediate_prefix (**str
))
1115 memset (ep
, 0, sizeof (expressionS
));
1117 save_in
= input_line_pointer
;
1118 input_line_pointer
= *str
;
1119 in_my_get_expression
= TRUE
;
1121 in_my_get_expression
= FALSE
;
1123 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1125 /* We found a bad or missing expression in md_operand(). */
1126 *str
= input_line_pointer
;
1127 input_line_pointer
= save_in
;
1128 if (inst
.error
== NULL
)
1129 inst
.error
= (ep
->X_op
== O_absent
1130 ? _("missing expression") :_("bad expression"));
1134 /* Get rid of any bignums now, so that we don't generate an error for which
1135 we can't establish a line number later on. Big numbers are never valid
1136 in instructions, which is where this routine is always called. */
1137 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1138 && (ep
->X_op
== O_big
1139 || (ep
->X_add_symbol
1140 && (walk_no_bignums (ep
->X_add_symbol
)
1142 && walk_no_bignums (ep
->X_op_symbol
))))))
1144 inst
.error
= _("invalid constant");
1145 *str
= input_line_pointer
;
1146 input_line_pointer
= save_in
;
1150 *str
= input_line_pointer
;
1151 input_line_pointer
= save_in
;
1155 /* Turn a string in input_line_pointer into a floating point constant
1156 of type TYPE, and store the appropriate bytes in *LITP. The number
1157 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1158 returned, or NULL on OK.
1160 Note that fp constants aren't represent in the normal way on the ARM.
1161 In big endian mode, things are as expected. However, in little endian
1162 mode fp constants are big-endian word-wise, and little-endian byte-wise
1163 within the words. For example, (double) 1.1 in big endian mode is
1164 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1165 the byte sequence 99 99 f1 3f 9a 99 99 99.
1167 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1170 md_atof (int type
, char * litP
, int * sizeP
)
1173 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1205 return _("Unrecognized or unsupported floating point constant");
1208 t
= atof_ieee (input_line_pointer
, type
, words
);
1210 input_line_pointer
= t
;
1211 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1213 if (target_big_endian
)
1215 for (i
= 0; i
< prec
; i
++)
1217 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1218 litP
+= sizeof (LITTLENUM_TYPE
);
1223 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1224 for (i
= prec
- 1; i
>= 0; i
--)
1226 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1227 litP
+= sizeof (LITTLENUM_TYPE
);
1230 /* For a 4 byte float the order of elements in `words' is 1 0.
1231 For an 8 byte float the order is 1 0 3 2. */
1232 for (i
= 0; i
< prec
; i
+= 2)
1234 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1235 sizeof (LITTLENUM_TYPE
));
1236 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1237 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1238 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1245 /* We handle all bad expressions here, so that we can report the faulty
1246 instruction in the error message. */
1249 md_operand (expressionS
* exp
)
1251 if (in_my_get_expression
)
1252 exp
->X_op
= O_illegal
;
1255 /* Immediate values. */
1258 /* Generic immediate-value read function for use in directives.
1259 Accepts anything that 'expression' can fold to a constant.
1260 *val receives the number. */
1263 immediate_for_directive (int *val
)
1266 exp
.X_op
= O_illegal
;
1268 if (is_immediate_prefix (*input_line_pointer
))
1270 input_line_pointer
++;
1274 if (exp
.X_op
!= O_constant
)
1276 as_bad (_("expected #constant"));
1277 ignore_rest_of_line ();
1280 *val
= exp
.X_add_number
;
1285 /* Register parsing. */
1287 /* Generic register parser. CCP points to what should be the
1288 beginning of a register name. If it is indeed a valid register
1289 name, advance CCP over it and return the reg_entry structure;
1290 otherwise return NULL. Does not issue diagnostics. */
1292 static struct reg_entry
*
1293 arm_reg_parse_multi (char **ccp
)
1297 struct reg_entry
*reg
;
1299 skip_whitespace (start
);
1301 #ifdef REGISTER_PREFIX
1302 if (*start
!= REGISTER_PREFIX
)
1306 #ifdef OPTIONAL_REGISTER_PREFIX
1307 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1312 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1317 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1319 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1329 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1330 enum arm_reg_type type
)
1332 /* Alternative syntaxes are accepted for a few register classes. */
1339 /* Generic coprocessor register names are allowed for these. */
1340 if (reg
&& reg
->type
== REG_TYPE_CN
)
1345 /* For backward compatibility, a bare number is valid here. */
1347 unsigned long processor
= strtoul (start
, ccp
, 10);
1348 if (*ccp
!= start
&& processor
<= 15)
1353 case REG_TYPE_MMXWC
:
1354 /* WC includes WCG. ??? I'm not sure this is true for all
1355 instructions that take WC registers. */
1356 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1367 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1368 return value is the register number or FAIL. */
1371 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1374 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1377 /* Do not allow a scalar (reg+index) to parse as a register. */
1378 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1381 if (reg
&& reg
->type
== type
)
1384 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1391 /* Parse a Neon type specifier. *STR should point at the leading '.'
1392 character. Does no verification at this stage that the type fits the opcode
1399 Can all be legally parsed by this function.
1401 Fills in neon_type struct pointer with parsed information, and updates STR
1402 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1403 type, FAIL if not. */
1406 parse_neon_type (struct neon_type
*type
, char **str
)
1413 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1415 enum neon_el_type thistype
= NT_untyped
;
1416 unsigned thissize
= -1u;
1423 /* Just a size without an explicit type. */
1427 switch (TOLOWER (*ptr
))
1429 case 'i': thistype
= NT_integer
; break;
1430 case 'f': thistype
= NT_float
; break;
1431 case 'p': thistype
= NT_poly
; break;
1432 case 's': thistype
= NT_signed
; break;
1433 case 'u': thistype
= NT_unsigned
; break;
1435 thistype
= NT_float
;
1440 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1446 /* .f is an abbreviation for .f32. */
1447 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1452 thissize
= strtoul (ptr
, &ptr
, 10);
1454 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1457 as_bad (_("bad size %d in type specifier"), thissize
);
1465 type
->el
[type
->elems
].type
= thistype
;
1466 type
->el
[type
->elems
].size
= thissize
;
1471 /* Empty/missing type is not a successful parse. */
1472 if (type
->elems
== 0)
1480 /* Errors may be set multiple times during parsing or bit encoding
1481 (particularly in the Neon bits), but usually the earliest error which is set
1482 will be the most meaningful. Avoid overwriting it with later (cascading)
1483 errors by calling this function. */
1486 first_error (const char *err
)
1492 /* Parse a single type, e.g. ".s32", leading period included. */
1494 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1497 struct neon_type optype
;
1501 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1503 if (optype
.elems
== 1)
1504 *vectype
= optype
.el
[0];
1507 first_error (_("only one type should be specified for operand"));
1513 first_error (_("vector type expected"));
1525 /* Special meanings for indices (which have a range of 0-7), which will fit into
1528 #define NEON_ALL_LANES 15
1529 #define NEON_INTERLEAVE_LANES 14
1531 /* Record a use of the given feature. */
1533 record_feature_use (const arm_feature_set
*feature
)
1536 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
1538 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
1541 /* If the given feature available in the selected CPU, mark it as used.
1542 Returns TRUE iff feature is available. */
1544 mark_feature_used (const arm_feature_set
*feature
)
1547 /* Do not support the use of MVE only instructions when in auto-detection or
1549 if (((feature
== &mve_ext
) || (feature
== &mve_fp_ext
))
1550 && ARM_CPU_IS_ANY (cpu_variant
))
1552 first_error (BAD_MVE_AUTO
);
1555 /* Ensure the option is valid on the current architecture. */
1556 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
1559 /* Add the appropriate architecture feature for the barrier option used.
1561 record_feature_use (feature
);
1566 /* Parse either a register or a scalar, with an optional type. Return the
1567 register number, and optionally fill in the actual type of the register
1568 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1569 type/index information in *TYPEINFO. */
1572 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1573 enum arm_reg_type
*rtype
,
1574 struct neon_typed_alias
*typeinfo
)
1577 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1578 struct neon_typed_alias atype
;
1579 struct neon_type_el parsetype
;
1583 atype
.eltype
.type
= NT_invtype
;
1584 atype
.eltype
.size
= -1;
1586 /* Try alternate syntax for some types of register. Note these are mutually
1587 exclusive with the Neon syntax extensions. */
1590 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1598 /* Undo polymorphism when a set of register types may be accepted. */
1599 if ((type
== REG_TYPE_NDQ
1600 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1601 || (type
== REG_TYPE_VFSD
1602 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1603 || (type
== REG_TYPE_NSDQ
1604 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1605 || reg
->type
== REG_TYPE_NQ
))
1606 || (type
== REG_TYPE_NSD
1607 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1608 || (type
== REG_TYPE_MMXWC
1609 && (reg
->type
== REG_TYPE_MMXWCG
)))
1610 type
= (enum arm_reg_type
) reg
->type
;
1612 if (type
== REG_TYPE_MQ
)
1614 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
1617 if (!reg
|| reg
->type
!= REG_TYPE_NQ
)
1620 if (reg
->number
> 14 && !mark_feature_used (&fpu_vfp_ext_d32
))
1622 first_error (_("expected MVE register [q0..q7]"));
1627 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
1628 && (type
== REG_TYPE_NQ
))
1632 if (type
!= reg
->type
)
1638 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1640 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1642 first_error (_("can't redefine type for operand"));
1645 atype
.defined
|= NTA_HASTYPE
;
1646 atype
.eltype
= parsetype
;
1649 if (skip_past_char (&str
, '[') == SUCCESS
)
1651 if (type
!= REG_TYPE_VFD
1652 && !(type
== REG_TYPE_VFS
1653 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
)))
1655 first_error (_("only D registers may be indexed"));
1659 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1661 first_error (_("can't change index for operand"));
1665 atype
.defined
|= NTA_HASINDEX
;
1667 if (skip_past_char (&str
, ']') == SUCCESS
)
1668 atype
.index
= NEON_ALL_LANES
;
1673 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1675 if (exp
.X_op
!= O_constant
)
1677 first_error (_("constant expression required"));
1681 if (skip_past_char (&str
, ']') == FAIL
)
1684 atype
.index
= exp
.X_add_number
;
1699 /* Like arm_reg_parse, but also allow the following extra features:
1700 - If RTYPE is non-zero, return the (possibly restricted) type of the
1701 register (e.g. Neon double or quad reg when either has been requested).
1702 - If this is a Neon vector type with additional type information, fill
1703 in the struct pointed to by VECTYPE (if non-NULL).
1704 This function will fault on encountering a scalar. */
1707 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1708 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1710 struct neon_typed_alias atype
;
1712 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1717 /* Do not allow regname(... to parse as a register. */
1721 /* Do not allow a scalar (reg+index) to parse as a register. */
1722 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1724 first_error (_("register operand expected, but got scalar"));
1729 *vectype
= atype
.eltype
;
1736 #define NEON_SCALAR_REG(X) ((X) >> 4)
1737 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1739 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1740 have enough information to be able to do a good job bounds-checking. So, we
1741 just do easy checks here, and do further checks later. */
1744 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1748 struct neon_typed_alias atype
;
1749 enum arm_reg_type reg_type
= REG_TYPE_VFD
;
1752 reg_type
= REG_TYPE_VFS
;
1754 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1756 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1759 if (atype
.index
== NEON_ALL_LANES
)
1761 first_error (_("scalar must have an index"));
1764 else if (atype
.index
>= 64 / elsize
)
1766 first_error (_("scalar index out of range"));
1771 *type
= atype
.eltype
;
1775 return reg
* 16 + atype
.index
;
1778 /* Types of registers in a list. */
1791 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1794 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1800 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
);
1802 /* We come back here if we get ranges concatenated by '+' or '|'. */
1805 skip_whitespace (str
);
1818 const char apsr_str
[] = "apsr";
1819 int apsr_str_len
= strlen (apsr_str
);
1821 reg
= arm_reg_parse (&str
, REGLIST_RN
);
1822 if (etype
== REGLIST_CLRM
)
1824 if (reg
== REG_SP
|| reg
== REG_PC
)
1826 else if (reg
== FAIL
1827 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1828 && !ISALPHA (*(str
+ apsr_str_len
)))
1831 str
+= apsr_str_len
;
1836 first_error (_("r0-r12, lr or APSR expected"));
1840 else /* etype == REGLIST_RN. */
1844 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1855 first_error (_("bad range in register list"));
1859 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1861 if (range
& (1 << i
))
1863 (_("Warning: duplicated register (r%d) in register list"),
1871 if (range
& (1 << reg
))
1872 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1874 else if (reg
<= cur_reg
)
1875 as_tsktsk (_("Warning: register range not in ascending order"));
1880 while (skip_past_comma (&str
) != FAIL
1881 || (in_range
= 1, *str
++ == '-'));
1884 if (skip_past_char (&str
, '}') == FAIL
)
1886 first_error (_("missing `}'"));
1890 else if (etype
== REGLIST_RN
)
1894 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1897 if (exp
.X_op
== O_constant
)
1899 if (exp
.X_add_number
1900 != (exp
.X_add_number
& 0x0000ffff))
1902 inst
.error
= _("invalid register mask");
1906 if ((range
& exp
.X_add_number
) != 0)
1908 int regno
= range
& exp
.X_add_number
;
1911 regno
= (1 << regno
) - 1;
1913 (_("Warning: duplicated register (r%d) in register list"),
1917 range
|= exp
.X_add_number
;
1921 if (inst
.relocs
[0].type
!= 0)
1923 inst
.error
= _("expression too complex");
1927 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
1928 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
1929 inst
.relocs
[0].pc_rel
= 0;
1933 if (*str
== '|' || *str
== '+')
1939 while (another_range
);
1945 /* Parse a VFP register list. If the string is invalid return FAIL.
1946 Otherwise return the number of registers, and set PBASE to the first
1947 register. Parses registers of type ETYPE.
1948 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1949 - Q registers can be used to specify pairs of D registers
1950 - { } can be omitted from around a singleton register list
1951 FIXME: This is not implemented, as it would require backtracking in
1954 This could be done (the meaning isn't really ambiguous), but doesn't
1955 fit in well with the current parsing framework.
1956 - 32 D registers may be used (also true for VFPv3).
1957 FIXME: Types are ignored in these register lists, which is probably a
1961 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
,
1962 bfd_boolean
*partial_match
)
1967 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1971 unsigned long mask
= 0;
1973 bfd_boolean vpr_seen
= FALSE
;
1974 bfd_boolean expect_vpr
=
1975 (etype
== REGLIST_VFP_S_VPR
) || (etype
== REGLIST_VFP_D_VPR
);
1977 if (skip_past_char (&str
, '{') == FAIL
)
1979 inst
.error
= _("expecting {");
1986 case REGLIST_VFP_S_VPR
:
1987 regtype
= REG_TYPE_VFS
;
1992 case REGLIST_VFP_D_VPR
:
1993 regtype
= REG_TYPE_VFD
;
1996 case REGLIST_NEON_D
:
1997 regtype
= REG_TYPE_NDQ
;
2004 if (etype
!= REGLIST_VFP_S
&& etype
!= REGLIST_VFP_S_VPR
)
2006 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2007 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
2011 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
2014 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
2021 base_reg
= max_regs
;
2022 *partial_match
= FALSE
;
2026 int setmask
= 1, addregs
= 1;
2027 const char vpr_str
[] = "vpr";
2028 int vpr_str_len
= strlen (vpr_str
);
2030 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
2034 if (new_base
== FAIL
2035 && !strncasecmp (str
, vpr_str
, vpr_str_len
)
2036 && !ISALPHA (*(str
+ vpr_str_len
))
2042 base_reg
= 0; /* Canonicalize VPR only on d0 with 0 regs. */
2046 first_error (_("VPR expected last"));
2049 else if (new_base
== FAIL
)
2051 if (regtype
== REG_TYPE_VFS
)
2052 first_error (_("VFP single precision register or VPR "
2054 else /* regtype == REG_TYPE_VFD. */
2055 first_error (_("VFP/Neon double precision register or VPR "
2060 else if (new_base
== FAIL
)
2062 first_error (_(reg_expected_msgs
[regtype
]));
2066 *partial_match
= TRUE
;
2070 if (new_base
>= max_regs
)
2072 first_error (_("register out of range in list"));
2076 /* Note: a value of 2 * n is returned for the register Q<n>. */
2077 if (regtype
== REG_TYPE_NQ
)
2083 if (new_base
< base_reg
)
2084 base_reg
= new_base
;
2086 if (mask
& (setmask
<< new_base
))
2088 first_error (_("invalid register list"));
2092 if ((mask
>> new_base
) != 0 && ! warned
&& !vpr_seen
)
2094 as_tsktsk (_("register list not in ascending order"));
2098 mask
|= setmask
<< new_base
;
2101 if (*str
== '-') /* We have the start of a range expression */
2107 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
2110 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
2114 if (high_range
>= max_regs
)
2116 first_error (_("register out of range in list"));
2120 if (regtype
== REG_TYPE_NQ
)
2121 high_range
= high_range
+ 1;
2123 if (high_range
<= new_base
)
2125 inst
.error
= _("register range not in ascending order");
2129 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2131 if (mask
& (setmask
<< new_base
))
2133 inst
.error
= _("invalid register list");
2137 mask
|= setmask
<< new_base
;
2142 while (skip_past_comma (&str
) != FAIL
);
2146 /* Sanity check -- should have raised a parse error above. */
2147 if ((!vpr_seen
&& count
== 0) || count
> max_regs
)
2152 if (expect_vpr
&& !vpr_seen
)
2154 first_error (_("VPR expected last"));
2158 /* Final test -- the registers must be consecutive. */
2160 for (i
= 0; i
< count
; i
++)
2162 if ((mask
& (1u << i
)) == 0)
2164 inst
.error
= _("non-contiguous register range");
2174 /* True if two alias types are the same. */
2177 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2185 if (a
->defined
!= b
->defined
)
2188 if ((a
->defined
& NTA_HASTYPE
) != 0
2189 && (a
->eltype
.type
!= b
->eltype
.type
2190 || a
->eltype
.size
!= b
->eltype
.size
))
2193 if ((a
->defined
& NTA_HASINDEX
) != 0
2194 && (a
->index
!= b
->index
))
2200 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2201 The base register is put in *PBASE.
2202 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2204 The register stride (minus one) is put in bit 4 of the return value.
2205 Bits [6:5] encode the list length (minus one).
2206 The type of the list elements is put in *ELTYPE, if non-NULL. */
2208 #define NEON_LANE(X) ((X) & 0xf)
2209 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2210 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2213 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2214 struct neon_type_el
*eltype
)
2221 int leading_brace
= 0;
2222 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2223 const char *const incr_error
= _("register stride must be 1 or 2");
2224 const char *const type_error
= _("mismatched element/structure types in list");
2225 struct neon_typed_alias firsttype
;
2226 firsttype
.defined
= 0;
2227 firsttype
.eltype
.type
= NT_invtype
;
2228 firsttype
.eltype
.size
= -1;
2229 firsttype
.index
= -1;
2231 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2236 struct neon_typed_alias atype
;
2237 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2241 first_error (_(reg_expected_msgs
[rtype
]));
2248 if (rtype
== REG_TYPE_NQ
)
2254 else if (reg_incr
== -1)
2256 reg_incr
= getreg
- base_reg
;
2257 if (reg_incr
< 1 || reg_incr
> 2)
2259 first_error (_(incr_error
));
2263 else if (getreg
!= base_reg
+ reg_incr
* count
)
2265 first_error (_(incr_error
));
2269 if (! neon_alias_types_same (&atype
, &firsttype
))
2271 first_error (_(type_error
));
2275 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2279 struct neon_typed_alias htype
;
2280 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2282 lane
= NEON_INTERLEAVE_LANES
;
2283 else if (lane
!= NEON_INTERLEAVE_LANES
)
2285 first_error (_(type_error
));
2290 else if (reg_incr
!= 1)
2292 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2296 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2299 first_error (_(reg_expected_msgs
[rtype
]));
2302 if (! neon_alias_types_same (&htype
, &firsttype
))
2304 first_error (_(type_error
));
2307 count
+= hireg
+ dregs
- getreg
;
2311 /* If we're using Q registers, we can't use [] or [n] syntax. */
2312 if (rtype
== REG_TYPE_NQ
)
2318 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2322 else if (lane
!= atype
.index
)
2324 first_error (_(type_error
));
2328 else if (lane
== -1)
2329 lane
= NEON_INTERLEAVE_LANES
;
2330 else if (lane
!= NEON_INTERLEAVE_LANES
)
2332 first_error (_(type_error
));
2337 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2339 /* No lane set by [x]. We must be interleaving structures. */
2341 lane
= NEON_INTERLEAVE_LANES
;
2344 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2345 || (count
> 1 && reg_incr
== -1))
2347 first_error (_("error parsing element/structure list"));
2351 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2353 first_error (_("expected }"));
2361 *eltype
= firsttype
.eltype
;
2366 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2369 /* Parse an explicit relocation suffix on an expression. This is
2370 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2371 arm_reloc_hsh contains no entries, so this function can only
2372 succeed if there is no () after the word. Returns -1 on error,
2373 BFD_RELOC_UNUSED if there wasn't any suffix. */
2376 parse_reloc (char **str
)
2378 struct reloc_entry
*r
;
2382 return BFD_RELOC_UNUSED
;
2387 while (*q
&& *q
!= ')' && *q
!= ',')
2392 if ((r
= (struct reloc_entry
*)
2393 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2400 /* Directives: register aliases. */
2402 static struct reg_entry
*
2403 insert_reg_alias (char *str
, unsigned number
, int type
)
2405 struct reg_entry
*new_reg
;
2408 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2410 if (new_reg
->builtin
)
2411 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2413 /* Only warn about a redefinition if it's not defined as the
2415 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2416 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2421 name
= xstrdup (str
);
2422 new_reg
= XNEW (struct reg_entry
);
2424 new_reg
->name
= name
;
2425 new_reg
->number
= number
;
2426 new_reg
->type
= type
;
2427 new_reg
->builtin
= FALSE
;
2428 new_reg
->neon
= NULL
;
2430 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2437 insert_neon_reg_alias (char *str
, int number
, int type
,
2438 struct neon_typed_alias
*atype
)
2440 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2444 first_error (_("attempt to redefine typed alias"));
2450 reg
->neon
= XNEW (struct neon_typed_alias
);
2451 *reg
->neon
= *atype
;
2455 /* Look for the .req directive. This is of the form:
2457 new_register_name .req existing_register_name
2459 If we find one, or if it looks sufficiently like one that we want to
2460 handle any error here, return TRUE. Otherwise return FALSE. */
2463 create_register_alias (char * newname
, char *p
)
2465 struct reg_entry
*old
;
2466 char *oldname
, *nbuf
;
2469 /* The input scrubber ensures that whitespace after the mnemonic is
2470 collapsed to single spaces. */
2472 if (strncmp (oldname
, " .req ", 6) != 0)
2476 if (*oldname
== '\0')
2479 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2482 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2486 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2487 the desired alias name, and p points to its end. If not, then
2488 the desired alias name is in the global original_case_string. */
2489 #ifdef TC_CASE_SENSITIVE
2492 newname
= original_case_string
;
2493 nlen
= strlen (newname
);
2496 nbuf
= xmemdup0 (newname
, nlen
);
2498 /* Create aliases under the new name as stated; an all-lowercase
2499 version of the new name; and an all-uppercase version of the new
2501 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2503 for (p
= nbuf
; *p
; p
++)
2506 if (strncmp (nbuf
, newname
, nlen
))
2508 /* If this attempt to create an additional alias fails, do not bother
2509 trying to create the all-lower case alias. We will fail and issue
2510 a second, duplicate error message. This situation arises when the
2511 programmer does something like:
2514 The second .req creates the "Foo" alias but then fails to create
2515 the artificial FOO alias because it has already been created by the
2517 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2524 for (p
= nbuf
; *p
; p
++)
2527 if (strncmp (nbuf
, newname
, nlen
))
2528 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2535 /* Create a Neon typed/indexed register alias using directives, e.g.:
2540 These typed registers can be used instead of the types specified after the
2541 Neon mnemonic, so long as all operands given have types. Types can also be
2542 specified directly, e.g.:
2543 vadd d0.s32, d1.s32, d2.s32 */
2546 create_neon_reg_alias (char *newname
, char *p
)
2548 enum arm_reg_type basetype
;
2549 struct reg_entry
*basereg
;
2550 struct reg_entry mybasereg
;
2551 struct neon_type ntype
;
2552 struct neon_typed_alias typeinfo
;
2553 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2556 typeinfo
.defined
= 0;
2557 typeinfo
.eltype
.type
= NT_invtype
;
2558 typeinfo
.eltype
.size
= -1;
2559 typeinfo
.index
= -1;
2563 if (strncmp (p
, " .dn ", 5) == 0)
2564 basetype
= REG_TYPE_VFD
;
2565 else if (strncmp (p
, " .qn ", 5) == 0)
2566 basetype
= REG_TYPE_NQ
;
2575 basereg
= arm_reg_parse_multi (&p
);
2577 if (basereg
&& basereg
->type
!= basetype
)
2579 as_bad (_("bad type for register"));
2583 if (basereg
== NULL
)
2586 /* Try parsing as an integer. */
2587 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2588 if (exp
.X_op
!= O_constant
)
2590 as_bad (_("expression must be constant"));
2593 basereg
= &mybasereg
;
2594 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2600 typeinfo
= *basereg
->neon
;
2602 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2604 /* We got a type. */
2605 if (typeinfo
.defined
& NTA_HASTYPE
)
2607 as_bad (_("can't redefine the type of a register alias"));
2611 typeinfo
.defined
|= NTA_HASTYPE
;
2612 if (ntype
.elems
!= 1)
2614 as_bad (_("you must specify a single type only"));
2617 typeinfo
.eltype
= ntype
.el
[0];
2620 if (skip_past_char (&p
, '[') == SUCCESS
)
2623 /* We got a scalar index. */
2625 if (typeinfo
.defined
& NTA_HASINDEX
)
2627 as_bad (_("can't redefine the index of a scalar alias"));
2631 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2633 if (exp
.X_op
!= O_constant
)
2635 as_bad (_("scalar index must be constant"));
2639 typeinfo
.defined
|= NTA_HASINDEX
;
2640 typeinfo
.index
= exp
.X_add_number
;
2642 if (skip_past_char (&p
, ']') == FAIL
)
2644 as_bad (_("expecting ]"));
2649 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2650 the desired alias name, and p points to its end. If not, then
2651 the desired alias name is in the global original_case_string. */
2652 #ifdef TC_CASE_SENSITIVE
2653 namelen
= nameend
- newname
;
2655 newname
= original_case_string
;
2656 namelen
= strlen (newname
);
2659 namebuf
= xmemdup0 (newname
, namelen
);
2661 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2662 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2664 /* Insert name in all uppercase. */
2665 for (p
= namebuf
; *p
; p
++)
2668 if (strncmp (namebuf
, newname
, namelen
))
2669 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2670 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2672 /* Insert name in all lowercase. */
2673 for (p
= namebuf
; *p
; p
++)
2676 if (strncmp (namebuf
, newname
, namelen
))
2677 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2678 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2684 /* Should never be called, as .req goes between the alias and the
2685 register name, not at the beginning of the line. */
2688 s_req (int a ATTRIBUTE_UNUSED
)
2690 as_bad (_("invalid syntax for .req directive"));
2694 s_dn (int a ATTRIBUTE_UNUSED
)
2696 as_bad (_("invalid syntax for .dn directive"));
2700 s_qn (int a ATTRIBUTE_UNUSED
)
2702 as_bad (_("invalid syntax for .qn directive"));
2705 /* The .unreq directive deletes an alias which was previously defined
2706 by .req. For example:
2712 s_unreq (int a ATTRIBUTE_UNUSED
)
2717 name
= input_line_pointer
;
2719 while (*input_line_pointer
!= 0
2720 && *input_line_pointer
!= ' '
2721 && *input_line_pointer
!= '\n')
2722 ++input_line_pointer
;
2724 saved_char
= *input_line_pointer
;
2725 *input_line_pointer
= 0;
2728 as_bad (_("invalid syntax for .unreq directive"));
2731 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2735 as_bad (_("unknown register alias '%s'"), name
);
2736 else if (reg
->builtin
)
2737 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2744 hash_delete (arm_reg_hsh
, name
, FALSE
);
2745 free ((char *) reg
->name
);
2750 /* Also locate the all upper case and all lower case versions.
2751 Do not complain if we cannot find one or the other as it
2752 was probably deleted above. */
2754 nbuf
= strdup (name
);
2755 for (p
= nbuf
; *p
; p
++)
2757 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2760 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2761 free ((char *) reg
->name
);
2767 for (p
= nbuf
; *p
; p
++)
2769 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2772 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2773 free ((char *) reg
->name
);
2783 *input_line_pointer
= saved_char
;
2784 demand_empty_rest_of_line ();
2787 /* Directives: Instruction set selection. */
2790 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2791 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2792 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2793 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2795 /* Create a new mapping symbol for the transition to STATE. */
2798 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2801 const char * symname
;
2808 type
= BSF_NO_FLAGS
;
2812 type
= BSF_NO_FLAGS
;
2816 type
= BSF_NO_FLAGS
;
2822 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2823 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2828 THUMB_SET_FUNC (symbolP
, 0);
2829 ARM_SET_THUMB (symbolP
, 0);
2830 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2834 THUMB_SET_FUNC (symbolP
, 1);
2835 ARM_SET_THUMB (symbolP
, 1);
2836 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2844 /* Save the mapping symbols for future reference. Also check that
2845 we do not place two mapping symbols at the same offset within a
2846 frag. We'll handle overlap between frags in
2847 check_mapping_symbols.
2849 If .fill or other data filling directive generates zero sized data,
2850 the mapping symbol for the following code will have the same value
2851 as the one generated for the data filling directive. In this case,
2852 we replace the old symbol with the new one at the same address. */
2855 if (frag
->tc_frag_data
.first_map
!= NULL
)
2857 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2858 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2860 frag
->tc_frag_data
.first_map
= symbolP
;
2862 if (frag
->tc_frag_data
.last_map
!= NULL
)
2864 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2865 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2866 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2868 frag
->tc_frag_data
.last_map
= symbolP
;
2871 /* We must sometimes convert a region marked as code to data during
2872 code alignment, if an odd number of bytes have to be padded. The
2873 code mapping symbol is pushed to an aligned address. */
2876 insert_data_mapping_symbol (enum mstate state
,
2877 valueT value
, fragS
*frag
, offsetT bytes
)
2879 /* If there was already a mapping symbol, remove it. */
2880 if (frag
->tc_frag_data
.last_map
!= NULL
2881 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2883 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2887 know (frag
->tc_frag_data
.first_map
== symp
);
2888 frag
->tc_frag_data
.first_map
= NULL
;
2890 frag
->tc_frag_data
.last_map
= NULL
;
2891 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2894 make_mapping_symbol (MAP_DATA
, value
, frag
);
2895 make_mapping_symbol (state
, value
+ bytes
, frag
);
2898 static void mapping_state_2 (enum mstate state
, int max_chars
);
2900 /* Set the mapping state to STATE. Only call this when about to
2901 emit some STATE bytes to the file. */
2903 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2905 mapping_state (enum mstate state
)
2907 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2909 if (mapstate
== state
)
2910 /* The mapping symbol has already been emitted.
2911 There is nothing else to do. */
2914 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2916 All ARM instructions require 4-byte alignment.
2917 (Almost) all Thumb instructions require 2-byte alignment.
2919 When emitting instructions into any section, mark the section
2922 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2923 but themselves require 2-byte alignment; this applies to some
2924 PC- relative forms. However, these cases will involve implicit
2925 literal pool generation or an explicit .align >=2, both of
2926 which will cause the section to me marked with sufficient
2927 alignment. Thus, we don't handle those cases here. */
2928 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2930 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2931 /* This case will be evaluated later. */
2934 mapping_state_2 (state
, 0);
2937 /* Same as mapping_state, but MAX_CHARS bytes have already been
2938 allocated. Put the mapping symbol that far back. */
2941 mapping_state_2 (enum mstate state
, int max_chars
)
2943 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2945 if (!SEG_NORMAL (now_seg
))
2948 if (mapstate
== state
)
2949 /* The mapping symbol has already been emitted.
2950 There is nothing else to do. */
2953 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2954 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2956 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2957 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2960 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2963 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2964 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2968 #define mapping_state(x) ((void)0)
2969 #define mapping_state_2(x, y) ((void)0)
2972 /* Find the real, Thumb encoded start of a Thumb function. */
2976 find_real_start (symbolS
* symbolP
)
2979 const char * name
= S_GET_NAME (symbolP
);
2980 symbolS
* new_target
;
2982 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2983 #define STUB_NAME ".real_start_of"
2988 /* The compiler may generate BL instructions to local labels because
2989 it needs to perform a branch to a far away location. These labels
2990 do not have a corresponding ".real_start_of" label. We check
2991 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2992 the ".real_start_of" convention for nonlocal branches. */
2993 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2996 real_start
= concat (STUB_NAME
, name
, NULL
);
2997 new_target
= symbol_find (real_start
);
3000 if (new_target
== NULL
)
3002 as_warn (_("Failed to find real start of function: %s\n"), name
);
3003 new_target
= symbolP
;
3011 opcode_select (int width
)
3018 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
3019 as_bad (_("selected processor does not support THUMB opcodes"));
3022 /* No need to force the alignment, since we will have been
3023 coming from ARM mode, which is word-aligned. */
3024 record_alignment (now_seg
, 1);
3031 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
3032 as_bad (_("selected processor does not support ARM opcodes"));
3037 frag_align (2, 0, 0);
3039 record_alignment (now_seg
, 1);
3044 as_bad (_("invalid instruction size selected (%d)"), width
);
3049 s_arm (int ignore ATTRIBUTE_UNUSED
)
3052 demand_empty_rest_of_line ();
3056 s_thumb (int ignore ATTRIBUTE_UNUSED
)
3059 demand_empty_rest_of_line ();
3063 s_code (int unused ATTRIBUTE_UNUSED
)
3067 temp
= get_absolute_expression ();
3072 opcode_select (temp
);
3076 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
3081 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
3083 /* If we are not already in thumb mode go into it, EVEN if
3084 the target processor does not support thumb instructions.
3085 This is used by gcc/config/arm/lib1funcs.asm for example
3086 to compile interworking support functions even if the
3087 target processor should not support interworking. */
3091 record_alignment (now_seg
, 1);
3094 demand_empty_rest_of_line ();
3098 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
3102 /* The following label is the name/address of the start of a Thumb function.
3103 We need to know this for the interworking support. */
3104 label_is_thumb_function_name
= TRUE
;
3107 /* Perform a .set directive, but also mark the alias as
3108 being a thumb function. */
3111 s_thumb_set (int equiv
)
3113 /* XXX the following is a duplicate of the code for s_set() in read.c
3114 We cannot just call that code as we need to get at the symbol that
3121 /* Especial apologies for the random logic:
3122 This just grew, and could be parsed much more simply!
3124 delim
= get_symbol_name (& name
);
3125 end_name
= input_line_pointer
;
3126 (void) restore_line_pointer (delim
);
3128 if (*input_line_pointer
!= ',')
3131 as_bad (_("expected comma after name \"%s\""), name
);
3133 ignore_rest_of_line ();
3137 input_line_pointer
++;
3140 if (name
[0] == '.' && name
[1] == '\0')
3142 /* XXX - this should not happen to .thumb_set. */
3146 if ((symbolP
= symbol_find (name
)) == NULL
3147 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3150 /* When doing symbol listings, play games with dummy fragments living
3151 outside the normal fragment chain to record the file and line info
3153 if (listing
& LISTING_SYMBOLS
)
3155 extern struct list_info_struct
* listing_tail
;
3156 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3158 memset (dummy_frag
, 0, sizeof (fragS
));
3159 dummy_frag
->fr_type
= rs_fill
;
3160 dummy_frag
->line
= listing_tail
;
3161 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
3162 dummy_frag
->fr_symbol
= symbolP
;
3166 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
3169 /* "set" symbols are local unless otherwise specified. */
3170 SF_SET_LOCAL (symbolP
);
3171 #endif /* OBJ_COFF */
3172 } /* Make a new symbol. */
3174 symbol_table_insert (symbolP
);
3179 && S_IS_DEFINED (symbolP
)
3180 && S_GET_SEGMENT (symbolP
) != reg_section
)
3181 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3183 pseudo_set (symbolP
);
3185 demand_empty_rest_of_line ();
3187 /* XXX Now we come to the Thumb specific bit of code. */
3189 THUMB_SET_FUNC (symbolP
, 1);
3190 ARM_SET_THUMB (symbolP
, 1);
3191 #if defined OBJ_ELF || defined OBJ_COFF
3192 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3196 /* Directives: Mode selection. */
3198 /* .syntax [unified|divided] - choose the new unified syntax
3199 (same for Arm and Thumb encoding, modulo slight differences in what
3200 can be represented) or the old divergent syntax for each mode. */
3202 s_syntax (int unused ATTRIBUTE_UNUSED
)
3206 delim
= get_symbol_name (& name
);
3208 if (!strcasecmp (name
, "unified"))
3209 unified_syntax
= TRUE
;
3210 else if (!strcasecmp (name
, "divided"))
3211 unified_syntax
= FALSE
;
3214 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3217 (void) restore_line_pointer (delim
);
3218 demand_empty_rest_of_line ();
3221 /* Directives: sectioning and alignment. */
3224 s_bss (int ignore ATTRIBUTE_UNUSED
)
3226 /* We don't support putting frags in the BSS segment, we fake it by
3227 marking in_bss, then looking at s_skip for clues. */
3228 subseg_set (bss_section
, 0);
3229 demand_empty_rest_of_line ();
3231 #ifdef md_elf_section_change_hook
3232 md_elf_section_change_hook ();
3237 s_even (int ignore ATTRIBUTE_UNUSED
)
3239 /* Never make frag if expect extra pass. */
3241 frag_align (1, 0, 0);
3243 record_alignment (now_seg
, 1);
3245 demand_empty_rest_of_line ();
3248 /* Directives: CodeComposer Studio. */
3250 /* .ref (for CodeComposer Studio syntax only). */
3252 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3254 if (codecomposer_syntax
)
3255 ignore_rest_of_line ();
3257 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3260 /* If name is not NULL, then it is used for marking the beginning of a
3261 function, whereas if it is NULL then it means the function end. */
3263 asmfunc_debug (const char * name
)
3265 static const char * last_name
= NULL
;
3269 gas_assert (last_name
== NULL
);
3272 if (debug_type
== DEBUG_STABS
)
3273 stabs_generate_asm_func (name
, name
);
3277 gas_assert (last_name
!= NULL
);
3279 if (debug_type
== DEBUG_STABS
)
3280 stabs_generate_asm_endfunc (last_name
, last_name
);
3287 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3289 if (codecomposer_syntax
)
3291 switch (asmfunc_state
)
3293 case OUTSIDE_ASMFUNC
:
3294 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3297 case WAITING_ASMFUNC_NAME
:
3298 as_bad (_(".asmfunc repeated."));
3301 case WAITING_ENDASMFUNC
:
3302 as_bad (_(".asmfunc without function."));
3305 demand_empty_rest_of_line ();
3308 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3312 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3314 if (codecomposer_syntax
)
3316 switch (asmfunc_state
)
3318 case OUTSIDE_ASMFUNC
:
3319 as_bad (_(".endasmfunc without a .asmfunc."));
3322 case WAITING_ASMFUNC_NAME
:
3323 as_bad (_(".endasmfunc without function."));
3326 case WAITING_ENDASMFUNC
:
3327 asmfunc_state
= OUTSIDE_ASMFUNC
;
3328 asmfunc_debug (NULL
);
3331 demand_empty_rest_of_line ();
3334 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3338 s_ccs_def (int name
)
3340 if (codecomposer_syntax
)
3343 as_bad (_(".def pseudo-op only available with -mccs flag."));
3346 /* Directives: Literal pools. */
3348 static literal_pool
*
3349 find_literal_pool (void)
3351 literal_pool
* pool
;
3353 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3355 if (pool
->section
== now_seg
3356 && pool
->sub_section
== now_subseg
)
3363 static literal_pool
*
3364 find_or_make_literal_pool (void)
3366 /* Next literal pool ID number. */
3367 static unsigned int latest_pool_num
= 1;
3368 literal_pool
* pool
;
3370 pool
= find_literal_pool ();
3374 /* Create a new pool. */
3375 pool
= XNEW (literal_pool
);
3379 pool
->next_free_entry
= 0;
3380 pool
->section
= now_seg
;
3381 pool
->sub_section
= now_subseg
;
3382 pool
->next
= list_of_pools
;
3383 pool
->symbol
= NULL
;
3384 pool
->alignment
= 2;
3386 /* Add it to the list. */
3387 list_of_pools
= pool
;
3390 /* New pools, and emptied pools, will have a NULL symbol. */
3391 if (pool
->symbol
== NULL
)
3393 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3394 (valueT
) 0, &zero_address_frag
);
3395 pool
->id
= latest_pool_num
++;
3402 /* Add the literal in the global 'inst'
3403 structure to the relevant literal pool. */
3406 add_to_lit_pool (unsigned int nbytes
)
3408 #define PADDING_SLOT 0x1
3409 #define LIT_ENTRY_SIZE_MASK 0xFF
3410 literal_pool
* pool
;
3411 unsigned int entry
, pool_size
= 0;
3412 bfd_boolean padding_slot_p
= FALSE
;
3418 imm1
= inst
.operands
[1].imm
;
3419 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3420 : inst
.relocs
[0].exp
.X_unsigned
? 0
3421 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3422 if (target_big_endian
)
3425 imm2
= inst
.operands
[1].imm
;
3429 pool
= find_or_make_literal_pool ();
3431 /* Check if this literal value is already in the pool. */
3432 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3436 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3437 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3438 && (pool
->literals
[entry
].X_add_number
3439 == inst
.relocs
[0].exp
.X_add_number
)
3440 && (pool
->literals
[entry
].X_md
== nbytes
)
3441 && (pool
->literals
[entry
].X_unsigned
3442 == inst
.relocs
[0].exp
.X_unsigned
))
3445 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3446 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3447 && (pool
->literals
[entry
].X_add_number
3448 == inst
.relocs
[0].exp
.X_add_number
)
3449 && (pool
->literals
[entry
].X_add_symbol
3450 == inst
.relocs
[0].exp
.X_add_symbol
)
3451 && (pool
->literals
[entry
].X_op_symbol
3452 == inst
.relocs
[0].exp
.X_op_symbol
)
3453 && (pool
->literals
[entry
].X_md
== nbytes
))
3456 else if ((nbytes
== 8)
3457 && !(pool_size
& 0x7)
3458 && ((entry
+ 1) != pool
->next_free_entry
)
3459 && (pool
->literals
[entry
].X_op
== O_constant
)
3460 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3461 && (pool
->literals
[entry
].X_unsigned
3462 == inst
.relocs
[0].exp
.X_unsigned
)
3463 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3464 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3465 && (pool
->literals
[entry
+ 1].X_unsigned
3466 == inst
.relocs
[0].exp
.X_unsigned
))
3469 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3470 if (padding_slot_p
&& (nbytes
== 4))
3476 /* Do we need to create a new entry? */
3477 if (entry
== pool
->next_free_entry
)
3479 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3481 inst
.error
= _("literal pool overflow");
3487 /* For 8-byte entries, we align to an 8-byte boundary,
3488 and split it into two 4-byte entries, because on 32-bit
3489 host, 8-byte constants are treated as big num, thus
3490 saved in "generic_bignum" which will be overwritten
3491 by later assignments.
3493 We also need to make sure there is enough space for
3496 We also check to make sure the literal operand is a
3498 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3499 || inst
.relocs
[0].exp
.X_op
== O_big
))
3501 inst
.error
= _("invalid type for literal pool");
3504 else if (pool_size
& 0x7)
3506 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3508 inst
.error
= _("literal pool overflow");
3512 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3513 pool
->literals
[entry
].X_op
= O_constant
;
3514 pool
->literals
[entry
].X_add_number
= 0;
3515 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3516 pool
->next_free_entry
+= 1;
3519 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3521 inst
.error
= _("literal pool overflow");
3525 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3526 pool
->literals
[entry
].X_op
= O_constant
;
3527 pool
->literals
[entry
].X_add_number
= imm1
;
3528 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3529 pool
->literals
[entry
++].X_md
= 4;
3530 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3531 pool
->literals
[entry
].X_op
= O_constant
;
3532 pool
->literals
[entry
].X_add_number
= imm2
;
3533 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3534 pool
->literals
[entry
].X_md
= 4;
3535 pool
->alignment
= 3;
3536 pool
->next_free_entry
+= 1;
3540 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3541 pool
->literals
[entry
].X_md
= 4;
3545 /* PR ld/12974: Record the location of the first source line to reference
3546 this entry in the literal pool. If it turns out during linking that the
3547 symbol does not exist we will be able to give an accurate line number for
3548 the (first use of the) missing reference. */
3549 if (debug_type
== DEBUG_DWARF2
)
3550 dwarf2_where (pool
->locs
+ entry
);
3552 pool
->next_free_entry
+= 1;
3554 else if (padding_slot_p
)
3556 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3557 pool
->literals
[entry
].X_md
= nbytes
;
3560 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3561 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3562 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3568 tc_start_label_without_colon (void)
3570 bfd_boolean ret
= TRUE
;
3572 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3574 const char *label
= input_line_pointer
;
3576 while (!is_end_of_line
[(int) label
[-1]])
3581 as_bad (_("Invalid label '%s'"), label
);
3585 asmfunc_debug (label
);
3587 asmfunc_state
= WAITING_ENDASMFUNC
;
3593 /* Can't use symbol_new here, so have to create a symbol and then at
3594 a later date assign it a value. That's what these functions do. */
3597 symbol_locate (symbolS
* symbolP
,
3598 const char * name
, /* It is copied, the caller can modify. */
3599 segT segment
, /* Segment identifier (SEG_<something>). */
3600 valueT valu
, /* Symbol value. */
3601 fragS
* frag
) /* Associated fragment. */
3604 char * preserved_copy_of_name
;
3606 name_length
= strlen (name
) + 1; /* +1 for \0. */
3607 obstack_grow (¬es
, name
, name_length
);
3608 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3610 #ifdef tc_canonicalize_symbol_name
3611 preserved_copy_of_name
=
3612 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3615 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3617 S_SET_SEGMENT (symbolP
, segment
);
3618 S_SET_VALUE (symbolP
, valu
);
3619 symbol_clear_list_pointers (symbolP
);
3621 symbol_set_frag (symbolP
, frag
);
3623 /* Link to end of symbol chain. */
3625 extern int symbol_table_frozen
;
3627 if (symbol_table_frozen
)
3631 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3633 obj_symbol_new_hook (symbolP
);
3635 #ifdef tc_symbol_new_hook
3636 tc_symbol_new_hook (symbolP
);
3640 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3641 #endif /* DEBUG_SYMS */
3645 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3648 literal_pool
* pool
;
3651 pool
= find_literal_pool ();
3653 || pool
->symbol
== NULL
3654 || pool
->next_free_entry
== 0)
3657 /* Align pool as you have word accesses.
3658 Only make a frag if we have to. */
3660 frag_align (pool
->alignment
, 0, 0);
3662 record_alignment (now_seg
, 2);
3665 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3666 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3668 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3670 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3671 (valueT
) frag_now_fix (), frag_now
);
3672 symbol_table_insert (pool
->symbol
);
3674 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3676 #if defined OBJ_COFF || defined OBJ_ELF
3677 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3680 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3683 if (debug_type
== DEBUG_DWARF2
)
3684 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3686 /* First output the expression in the instruction to the pool. */
3687 emit_expr (&(pool
->literals
[entry
]),
3688 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3691 /* Mark the pool as empty. */
3692 pool
->next_free_entry
= 0;
3693 pool
->symbol
= NULL
;
3697 /* Forward declarations for functions below, in the MD interface
3699 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3700 static valueT
create_unwind_entry (int);
3701 static void start_unwind_section (const segT
, int);
3702 static void add_unwind_opcode (valueT
, int);
3703 static void flush_pending_unwind (void);
3705 /* Directives: Data. */
3708 s_arm_elf_cons (int nbytes
)
3712 #ifdef md_flush_pending_output
3713 md_flush_pending_output ();
3716 if (is_it_end_of_statement ())
3718 demand_empty_rest_of_line ();
3722 #ifdef md_cons_align
3723 md_cons_align (nbytes
);
3726 mapping_state (MAP_DATA
);
3730 char *base
= input_line_pointer
;
3734 if (exp
.X_op
!= O_symbol
)
3735 emit_expr (&exp
, (unsigned int) nbytes
);
3738 char *before_reloc
= input_line_pointer
;
3739 reloc
= parse_reloc (&input_line_pointer
);
3742 as_bad (_("unrecognized relocation suffix"));
3743 ignore_rest_of_line ();
3746 else if (reloc
== BFD_RELOC_UNUSED
)
3747 emit_expr (&exp
, (unsigned int) nbytes
);
3750 reloc_howto_type
*howto
= (reloc_howto_type
*)
3751 bfd_reloc_type_lookup (stdoutput
,
3752 (bfd_reloc_code_real_type
) reloc
);
3753 int size
= bfd_get_reloc_size (howto
);
3755 if (reloc
== BFD_RELOC_ARM_PLT32
)
3757 as_bad (_("(plt) is only valid on branch targets"));
3758 reloc
= BFD_RELOC_UNUSED
;
3763 as_bad (ngettext ("%s relocations do not fit in %d byte",
3764 "%s relocations do not fit in %d bytes",
3766 howto
->name
, nbytes
);
3769 /* We've parsed an expression stopping at O_symbol.
3770 But there may be more expression left now that we
3771 have parsed the relocation marker. Parse it again.
3772 XXX Surely there is a cleaner way to do this. */
3773 char *p
= input_line_pointer
;
3775 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3777 memcpy (save_buf
, base
, input_line_pointer
- base
);
3778 memmove (base
+ (input_line_pointer
- before_reloc
),
3779 base
, before_reloc
- base
);
3781 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3783 memcpy (base
, save_buf
, p
- base
);
3785 offset
= nbytes
- size
;
3786 p
= frag_more (nbytes
);
3787 memset (p
, 0, nbytes
);
3788 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3789 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3795 while (*input_line_pointer
++ == ',');
3797 /* Put terminator back into stream. */
3798 input_line_pointer
--;
3799 demand_empty_rest_of_line ();
3802 /* Emit an expression containing a 32-bit thumb instruction.
3803 Implementation based on put_thumb32_insn. */
3806 emit_thumb32_expr (expressionS
* exp
)
3808 expressionS exp_high
= *exp
;
3810 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3811 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3812 exp
->X_add_number
&= 0xffff;
3813 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3816 /* Guess the instruction size based on the opcode. */
3819 thumb_insn_size (int opcode
)
3821 if ((unsigned int) opcode
< 0xe800u
)
3823 else if ((unsigned int) opcode
>= 0xe8000000u
)
3830 emit_insn (expressionS
*exp
, int nbytes
)
3834 if (exp
->X_op
== O_constant
)
3839 size
= thumb_insn_size (exp
->X_add_number
);
3843 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3845 as_bad (_(".inst.n operand too big. "\
3846 "Use .inst.w instead"));
3851 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
3852 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN
, 0);
3854 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3856 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3857 emit_thumb32_expr (exp
);
3859 emit_expr (exp
, (unsigned int) size
);
3861 it_fsm_post_encode ();
3865 as_bad (_("cannot determine Thumb instruction size. " \
3866 "Use .inst.n/.inst.w instead"));
3869 as_bad (_("constant expression required"));
3874 /* Like s_arm_elf_cons but do not use md_cons_align and
3875 set the mapping state to MAP_ARM/MAP_THUMB. */
3878 s_arm_elf_inst (int nbytes
)
3880 if (is_it_end_of_statement ())
3882 demand_empty_rest_of_line ();
3886 /* Calling mapping_state () here will not change ARM/THUMB,
3887 but will ensure not to be in DATA state. */
3890 mapping_state (MAP_THUMB
);
3895 as_bad (_("width suffixes are invalid in ARM mode"));
3896 ignore_rest_of_line ();
3902 mapping_state (MAP_ARM
);
3911 if (! emit_insn (& exp
, nbytes
))
3913 ignore_rest_of_line ();
3917 while (*input_line_pointer
++ == ',');
3919 /* Put terminator back into stream. */
3920 input_line_pointer
--;
3921 demand_empty_rest_of_line ();
3924 /* Parse a .rel31 directive. */
3927 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3934 if (*input_line_pointer
== '1')
3935 highbit
= 0x80000000;
3936 else if (*input_line_pointer
!= '0')
3937 as_bad (_("expected 0 or 1"));
3939 input_line_pointer
++;
3940 if (*input_line_pointer
!= ',')
3941 as_bad (_("missing comma"));
3942 input_line_pointer
++;
3944 #ifdef md_flush_pending_output
3945 md_flush_pending_output ();
3948 #ifdef md_cons_align
3952 mapping_state (MAP_DATA
);
3957 md_number_to_chars (p
, highbit
, 4);
3958 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3959 BFD_RELOC_ARM_PREL31
);
3961 demand_empty_rest_of_line ();
3964 /* Directives: AEABI stack-unwind tables. */
3966 /* Parse an unwind_fnstart directive. Simply records the current location. */
3969 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3971 demand_empty_rest_of_line ();
3972 if (unwind
.proc_start
)
3974 as_bad (_("duplicate .fnstart directive"));
3978 /* Mark the start of the function. */
3979 unwind
.proc_start
= expr_build_dot ();
3981 /* Reset the rest of the unwind info. */
3982 unwind
.opcode_count
= 0;
3983 unwind
.table_entry
= NULL
;
3984 unwind
.personality_routine
= NULL
;
3985 unwind
.personality_index
= -1;
3986 unwind
.frame_size
= 0;
3987 unwind
.fp_offset
= 0;
3988 unwind
.fp_reg
= REG_SP
;
3990 unwind
.sp_restored
= 0;
3994 /* Parse a handlerdata directive. Creates the exception handling table entry
3995 for the function. */
3998 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
4000 demand_empty_rest_of_line ();
4001 if (!unwind
.proc_start
)
4002 as_bad (MISSING_FNSTART
);
4004 if (unwind
.table_entry
)
4005 as_bad (_("duplicate .handlerdata directive"));
4007 create_unwind_entry (1);
4010 /* Parse an unwind_fnend directive. Generates the index table entry. */
4013 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
4018 unsigned int marked_pr_dependency
;
4020 demand_empty_rest_of_line ();
4022 if (!unwind
.proc_start
)
4024 as_bad (_(".fnend directive without .fnstart"));
4028 /* Add eh table entry. */
4029 if (unwind
.table_entry
== NULL
)
4030 val
= create_unwind_entry (0);
4034 /* Add index table entry. This is two words. */
4035 start_unwind_section (unwind
.saved_seg
, 1);
4036 frag_align (2, 0, 0);
4037 record_alignment (now_seg
, 2);
4039 ptr
= frag_more (8);
4041 where
= frag_now_fix () - 8;
4043 /* Self relative offset of the function start. */
4044 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
4045 BFD_RELOC_ARM_PREL31
);
4047 /* Indicate dependency on EHABI-defined personality routines to the
4048 linker, if it hasn't been done already. */
4049 marked_pr_dependency
4050 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
4051 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
4052 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
4054 static const char *const name
[] =
4056 "__aeabi_unwind_cpp_pr0",
4057 "__aeabi_unwind_cpp_pr1",
4058 "__aeabi_unwind_cpp_pr2"
4060 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
4061 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
4062 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
4063 |= 1 << unwind
.personality_index
;
4067 /* Inline exception table entry. */
4068 md_number_to_chars (ptr
+ 4, val
, 4);
4070 /* Self relative offset of the table entry. */
4071 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
4072 BFD_RELOC_ARM_PREL31
);
4074 /* Restore the original section. */
4075 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
4077 unwind
.proc_start
= NULL
;
4081 /* Parse an unwind_cantunwind directive. */
4084 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
4086 demand_empty_rest_of_line ();
4087 if (!unwind
.proc_start
)
4088 as_bad (MISSING_FNSTART
);
4090 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4091 as_bad (_("personality routine specified for cantunwind frame"));
4093 unwind
.personality_index
= -2;
4097 /* Parse a personalityindex directive. */
4100 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
4104 if (!unwind
.proc_start
)
4105 as_bad (MISSING_FNSTART
);
4107 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4108 as_bad (_("duplicate .personalityindex directive"));
4112 if (exp
.X_op
!= O_constant
4113 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
4115 as_bad (_("bad personality routine number"));
4116 ignore_rest_of_line ();
4120 unwind
.personality_index
= exp
.X_add_number
;
4122 demand_empty_rest_of_line ();
4126 /* Parse a personality directive. */
4129 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
4133 if (!unwind
.proc_start
)
4134 as_bad (MISSING_FNSTART
);
4136 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4137 as_bad (_("duplicate .personality directive"));
4139 c
= get_symbol_name (& name
);
4140 p
= input_line_pointer
;
4142 ++ input_line_pointer
;
4143 unwind
.personality_routine
= symbol_find_or_make (name
);
4145 demand_empty_rest_of_line ();
4149 /* Parse a directive saving core registers. */
4152 s_arm_unwind_save_core (void)
4158 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4161 as_bad (_("expected register list"));
4162 ignore_rest_of_line ();
4166 demand_empty_rest_of_line ();
4168 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4169 into .unwind_save {..., sp...}. We aren't bothered about the value of
4170 ip because it is clobbered by calls. */
4171 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4172 && (range
& 0x3000) == 0x1000)
4174 unwind
.opcode_count
--;
4175 unwind
.sp_restored
= 0;
4176 range
= (range
| 0x2000) & ~0x1000;
4177 unwind
.pending_offset
= 0;
4183 /* See if we can use the short opcodes. These pop a block of up to 8
4184 registers starting with r4, plus maybe r14. */
4185 for (n
= 0; n
< 8; n
++)
4187 /* Break at the first non-saved register. */
4188 if ((range
& (1 << (n
+ 4))) == 0)
4191 /* See if there are any other bits set. */
4192 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4194 /* Use the long form. */
4195 op
= 0x8000 | ((range
>> 4) & 0xfff);
4196 add_unwind_opcode (op
, 2);
4200 /* Use the short form. */
4202 op
= 0xa8; /* Pop r14. */
4204 op
= 0xa0; /* Do not pop r14. */
4206 add_unwind_opcode (op
, 1);
4213 op
= 0xb100 | (range
& 0xf);
4214 add_unwind_opcode (op
, 2);
4217 /* Record the number of bytes pushed. */
4218 for (n
= 0; n
< 16; n
++)
4220 if (range
& (1 << n
))
4221 unwind
.frame_size
+= 4;
4226 /* Parse a directive saving FPA registers. */
4229 s_arm_unwind_save_fpa (int reg
)
4235 /* Get Number of registers to transfer. */
4236 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4239 exp
.X_op
= O_illegal
;
4241 if (exp
.X_op
!= O_constant
)
4243 as_bad (_("expected , <constant>"));
4244 ignore_rest_of_line ();
4248 num_regs
= exp
.X_add_number
;
4250 if (num_regs
< 1 || num_regs
> 4)
4252 as_bad (_("number of registers must be in the range [1:4]"));
4253 ignore_rest_of_line ();
4257 demand_empty_rest_of_line ();
4262 op
= 0xb4 | (num_regs
- 1);
4263 add_unwind_opcode (op
, 1);
4268 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4269 add_unwind_opcode (op
, 2);
4271 unwind
.frame_size
+= num_regs
* 12;
4275 /* Parse a directive saving VFP registers for ARMv6 and above. */
4278 s_arm_unwind_save_vfp_armv6 (void)
4283 int num_vfpv3_regs
= 0;
4284 int num_regs_below_16
;
4285 bfd_boolean partial_match
;
4287 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
,
4291 as_bad (_("expected register list"));
4292 ignore_rest_of_line ();
4296 demand_empty_rest_of_line ();
4298 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4299 than FSTMX/FLDMX-style ones). */
4301 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4303 num_vfpv3_regs
= count
;
4304 else if (start
+ count
> 16)
4305 num_vfpv3_regs
= start
+ count
- 16;
4307 if (num_vfpv3_regs
> 0)
4309 int start_offset
= start
> 16 ? start
- 16 : 0;
4310 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4311 add_unwind_opcode (op
, 2);
4314 /* Generate opcode for registers numbered in the range 0 .. 15. */
4315 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4316 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4317 if (num_regs_below_16
> 0)
4319 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4320 add_unwind_opcode (op
, 2);
4323 unwind
.frame_size
+= count
* 8;
4327 /* Parse a directive saving VFP registers for pre-ARMv6. */
4330 s_arm_unwind_save_vfp (void)
4335 bfd_boolean partial_match
;
4337 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
,
4341 as_bad (_("expected register list"));
4342 ignore_rest_of_line ();
4346 demand_empty_rest_of_line ();
4351 op
= 0xb8 | (count
- 1);
4352 add_unwind_opcode (op
, 1);
4357 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4358 add_unwind_opcode (op
, 2);
4360 unwind
.frame_size
+= count
* 8 + 4;
4364 /* Parse a directive saving iWMMXt data registers. */
4367 s_arm_unwind_save_mmxwr (void)
4375 if (*input_line_pointer
== '{')
4376 input_line_pointer
++;
4380 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4384 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4389 as_tsktsk (_("register list not in ascending order"));
4392 if (*input_line_pointer
== '-')
4394 input_line_pointer
++;
4395 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4398 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4401 else if (reg
>= hi_reg
)
4403 as_bad (_("bad register range"));
4406 for (; reg
< hi_reg
; reg
++)
4410 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4412 skip_past_char (&input_line_pointer
, '}');
4414 demand_empty_rest_of_line ();
4416 /* Generate any deferred opcodes because we're going to be looking at
4418 flush_pending_unwind ();
4420 for (i
= 0; i
< 16; i
++)
4422 if (mask
& (1 << i
))
4423 unwind
.frame_size
+= 8;
4426 /* Attempt to combine with a previous opcode. We do this because gcc
4427 likes to output separate unwind directives for a single block of
4429 if (unwind
.opcode_count
> 0)
4431 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4432 if ((i
& 0xf8) == 0xc0)
4435 /* Only merge if the blocks are contiguous. */
4438 if ((mask
& 0xfe00) == (1 << 9))
4440 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4441 unwind
.opcode_count
--;
4444 else if (i
== 6 && unwind
.opcode_count
>= 2)
4446 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4450 op
= 0xffff << (reg
- 1);
4452 && ((mask
& op
) == (1u << (reg
- 1))))
4454 op
= (1 << (reg
+ i
+ 1)) - 1;
4455 op
&= ~((1 << reg
) - 1);
4457 unwind
.opcode_count
-= 2;
4464 /* We want to generate opcodes in the order the registers have been
4465 saved, ie. descending order. */
4466 for (reg
= 15; reg
>= -1; reg
--)
4468 /* Save registers in blocks. */
4470 || !(mask
& (1 << reg
)))
4472 /* We found an unsaved reg. Generate opcodes to save the
4479 op
= 0xc0 | (hi_reg
- 10);
4480 add_unwind_opcode (op
, 1);
4485 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4486 add_unwind_opcode (op
, 2);
4495 ignore_rest_of_line ();
4499 s_arm_unwind_save_mmxwcg (void)
4506 if (*input_line_pointer
== '{')
4507 input_line_pointer
++;
4509 skip_whitespace (input_line_pointer
);
4513 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4517 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4523 as_tsktsk (_("register list not in ascending order"));
4526 if (*input_line_pointer
== '-')
4528 input_line_pointer
++;
4529 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4532 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4535 else if (reg
>= hi_reg
)
4537 as_bad (_("bad register range"));
4540 for (; reg
< hi_reg
; reg
++)
4544 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4546 skip_past_char (&input_line_pointer
, '}');
4548 demand_empty_rest_of_line ();
4550 /* Generate any deferred opcodes because we're going to be looking at
4552 flush_pending_unwind ();
4554 for (reg
= 0; reg
< 16; reg
++)
4556 if (mask
& (1 << reg
))
4557 unwind
.frame_size
+= 4;
4560 add_unwind_opcode (op
, 2);
4563 ignore_rest_of_line ();
4567 /* Parse an unwind_save directive.
4568 If the argument is non-zero, this is a .vsave directive. */
4571 s_arm_unwind_save (int arch_v6
)
4574 struct reg_entry
*reg
;
4575 bfd_boolean had_brace
= FALSE
;
4577 if (!unwind
.proc_start
)
4578 as_bad (MISSING_FNSTART
);
4580 /* Figure out what sort of save we have. */
4581 peek
= input_line_pointer
;
4589 reg
= arm_reg_parse_multi (&peek
);
4593 as_bad (_("register expected"));
4594 ignore_rest_of_line ();
4603 as_bad (_("FPA .unwind_save does not take a register list"));
4604 ignore_rest_of_line ();
4607 input_line_pointer
= peek
;
4608 s_arm_unwind_save_fpa (reg
->number
);
4612 s_arm_unwind_save_core ();
4617 s_arm_unwind_save_vfp_armv6 ();
4619 s_arm_unwind_save_vfp ();
4622 case REG_TYPE_MMXWR
:
4623 s_arm_unwind_save_mmxwr ();
4626 case REG_TYPE_MMXWCG
:
4627 s_arm_unwind_save_mmxwcg ();
4631 as_bad (_(".unwind_save does not support this kind of register"));
4632 ignore_rest_of_line ();
4637 /* Parse an unwind_movsp directive. */
4640 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4646 if (!unwind
.proc_start
)
4647 as_bad (MISSING_FNSTART
);
4649 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4652 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4653 ignore_rest_of_line ();
4657 /* Optional constant. */
4658 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4660 if (immediate_for_directive (&offset
) == FAIL
)
4666 demand_empty_rest_of_line ();
4668 if (reg
== REG_SP
|| reg
== REG_PC
)
4670 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4674 if (unwind
.fp_reg
!= REG_SP
)
4675 as_bad (_("unexpected .unwind_movsp directive"));
4677 /* Generate opcode to restore the value. */
4679 add_unwind_opcode (op
, 1);
4681 /* Record the information for later. */
4682 unwind
.fp_reg
= reg
;
4683 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4684 unwind
.sp_restored
= 1;
4687 /* Parse an unwind_pad directive. */
4690 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4694 if (!unwind
.proc_start
)
4695 as_bad (MISSING_FNSTART
);
4697 if (immediate_for_directive (&offset
) == FAIL
)
4702 as_bad (_("stack increment must be multiple of 4"));
4703 ignore_rest_of_line ();
4707 /* Don't generate any opcodes, just record the details for later. */
4708 unwind
.frame_size
+= offset
;
4709 unwind
.pending_offset
+= offset
;
4711 demand_empty_rest_of_line ();
4714 /* Parse an unwind_setfp directive. */
4717 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4723 if (!unwind
.proc_start
)
4724 as_bad (MISSING_FNSTART
);
4726 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4727 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4730 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4732 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4734 as_bad (_("expected <reg>, <reg>"));
4735 ignore_rest_of_line ();
4739 /* Optional constant. */
4740 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4742 if (immediate_for_directive (&offset
) == FAIL
)
4748 demand_empty_rest_of_line ();
4750 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4752 as_bad (_("register must be either sp or set by a previous"
4753 "unwind_movsp directive"));
4757 /* Don't generate any opcodes, just record the information for later. */
4758 unwind
.fp_reg
= fp_reg
;
4760 if (sp_reg
== REG_SP
)
4761 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4763 unwind
.fp_offset
-= offset
;
4766 /* Parse an unwind_raw directive. */
4769 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4772 /* This is an arbitrary limit. */
4773 unsigned char op
[16];
4776 if (!unwind
.proc_start
)
4777 as_bad (MISSING_FNSTART
);
4780 if (exp
.X_op
== O_constant
4781 && skip_past_comma (&input_line_pointer
) != FAIL
)
4783 unwind
.frame_size
+= exp
.X_add_number
;
4787 exp
.X_op
= O_illegal
;
4789 if (exp
.X_op
!= O_constant
)
4791 as_bad (_("expected <offset>, <opcode>"));
4792 ignore_rest_of_line ();
4798 /* Parse the opcode. */
4803 as_bad (_("unwind opcode too long"));
4804 ignore_rest_of_line ();
4806 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4808 as_bad (_("invalid unwind opcode"));
4809 ignore_rest_of_line ();
4812 op
[count
++] = exp
.X_add_number
;
4814 /* Parse the next byte. */
4815 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4821 /* Add the opcode bytes in reverse order. */
4823 add_unwind_opcode (op
[count
], 1);
4825 demand_empty_rest_of_line ();
4829 /* Parse a .eabi_attribute directive. */
4832 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4834 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4836 if (tag
>= 0 && tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4837 attributes_set_explicitly
[tag
] = 1;
4840 /* Emit a tls fix for the symbol. */
4843 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4847 #ifdef md_flush_pending_output
4848 md_flush_pending_output ();
4851 #ifdef md_cons_align
4855 /* Since we're just labelling the code, there's no need to define a
4858 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4859 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4860 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4861 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4863 #endif /* OBJ_ELF */
4865 static void s_arm_arch (int);
4866 static void s_arm_object_arch (int);
4867 static void s_arm_cpu (int);
4868 static void s_arm_fpu (int);
4869 static void s_arm_arch_extension (int);
4874 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4881 if (exp
.X_op
== O_symbol
)
4882 exp
.X_op
= O_secrel
;
4884 emit_expr (&exp
, 4);
4886 while (*input_line_pointer
++ == ',');
4888 input_line_pointer
--;
4889 demand_empty_rest_of_line ();
4893 /* This table describes all the machine specific pseudo-ops the assembler
4894 has to support. The fields are:
4895 pseudo-op name without dot
4896 function to call to execute this pseudo-op
4897 Integer arg to pass to the function. */
4899 const pseudo_typeS md_pseudo_table
[] =
4901 /* Never called because '.req' does not start a line. */
4902 { "req", s_req
, 0 },
4903 /* Following two are likewise never called. */
4906 { "unreq", s_unreq
, 0 },
4907 { "bss", s_bss
, 0 },
4908 { "align", s_align_ptwo
, 2 },
4909 { "arm", s_arm
, 0 },
4910 { "thumb", s_thumb
, 0 },
4911 { "code", s_code
, 0 },
4912 { "force_thumb", s_force_thumb
, 0 },
4913 { "thumb_func", s_thumb_func
, 0 },
4914 { "thumb_set", s_thumb_set
, 0 },
4915 { "even", s_even
, 0 },
4916 { "ltorg", s_ltorg
, 0 },
4917 { "pool", s_ltorg
, 0 },
4918 { "syntax", s_syntax
, 0 },
4919 { "cpu", s_arm_cpu
, 0 },
4920 { "arch", s_arm_arch
, 0 },
4921 { "object_arch", s_arm_object_arch
, 0 },
4922 { "fpu", s_arm_fpu
, 0 },
4923 { "arch_extension", s_arm_arch_extension
, 0 },
4925 { "word", s_arm_elf_cons
, 4 },
4926 { "long", s_arm_elf_cons
, 4 },
4927 { "inst.n", s_arm_elf_inst
, 2 },
4928 { "inst.w", s_arm_elf_inst
, 4 },
4929 { "inst", s_arm_elf_inst
, 0 },
4930 { "rel31", s_arm_rel31
, 0 },
4931 { "fnstart", s_arm_unwind_fnstart
, 0 },
4932 { "fnend", s_arm_unwind_fnend
, 0 },
4933 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4934 { "personality", s_arm_unwind_personality
, 0 },
4935 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4936 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4937 { "save", s_arm_unwind_save
, 0 },
4938 { "vsave", s_arm_unwind_save
, 1 },
4939 { "movsp", s_arm_unwind_movsp
, 0 },
4940 { "pad", s_arm_unwind_pad
, 0 },
4941 { "setfp", s_arm_unwind_setfp
, 0 },
4942 { "unwind_raw", s_arm_unwind_raw
, 0 },
4943 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4944 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4948 /* These are used for dwarf. */
4952 /* These are used for dwarf2. */
4953 { "file", dwarf2_directive_file
, 0 },
4954 { "loc", dwarf2_directive_loc
, 0 },
4955 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4957 { "extend", float_cons
, 'x' },
4958 { "ldouble", float_cons
, 'x' },
4959 { "packed", float_cons
, 'p' },
4961 {"secrel32", pe_directive_secrel
, 0},
4964 /* These are for compatibility with CodeComposer Studio. */
4965 {"ref", s_ccs_ref
, 0},
4966 {"def", s_ccs_def
, 0},
4967 {"asmfunc", s_ccs_asmfunc
, 0},
4968 {"endasmfunc", s_ccs_endasmfunc
, 0},
4973 /* Parser functions used exclusively in instruction operands. */
4975 /* Generic immediate-value read function for use in insn parsing.
4976 STR points to the beginning of the immediate (the leading #);
4977 VAL receives the value; if the value is outside [MIN, MAX]
4978 issue an error. PREFIX_OPT is true if the immediate prefix is
4982 parse_immediate (char **str
, int *val
, int min
, int max
,
4983 bfd_boolean prefix_opt
)
4987 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4988 if (exp
.X_op
!= O_constant
)
4990 inst
.error
= _("constant expression required");
4994 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4996 inst
.error
= _("immediate value out of range");
5000 *val
= exp
.X_add_number
;
5004 /* Less-generic immediate-value read function with the possibility of loading a
5005 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5006 instructions. Puts the result directly in inst.operands[i]. */
5009 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
5010 bfd_boolean allow_symbol_p
)
5013 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
5016 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
5018 if (exp_p
->X_op
== O_constant
)
5020 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
5021 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5022 O_constant. We have to be careful not to break compilation for
5023 32-bit X_add_number, though. */
5024 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
5026 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5027 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
5029 inst
.operands
[i
].regisimm
= 1;
5032 else if (exp_p
->X_op
== O_big
5033 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
5035 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
5037 /* Bignums have their least significant bits in
5038 generic_bignum[0]. Make sure we put 32 bits in imm and
5039 32 bits in reg, in a (hopefully) portable way. */
5040 gas_assert (parts
!= 0);
5042 /* Make sure that the number is not too big.
5043 PR 11972: Bignums can now be sign-extended to the
5044 size of a .octa so check that the out of range bits
5045 are all zero or all one. */
5046 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
5048 LITTLENUM_TYPE m
= -1;
5050 if (generic_bignum
[parts
* 2] != 0
5051 && generic_bignum
[parts
* 2] != m
)
5054 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
5055 if (generic_bignum
[j
] != generic_bignum
[j
-1])
5059 inst
.operands
[i
].imm
= 0;
5060 for (j
= 0; j
< parts
; j
++, idx
++)
5061 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
5062 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5063 inst
.operands
[i
].reg
= 0;
5064 for (j
= 0; j
< parts
; j
++, idx
++)
5065 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
5066 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5067 inst
.operands
[i
].regisimm
= 1;
5069 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
5077 /* Returns the pseudo-register number of an FPA immediate constant,
5078 or FAIL if there isn't a valid constant here. */
5081 parse_fpa_immediate (char ** str
)
5083 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5089 /* First try and match exact strings, this is to guarantee
5090 that some formats will work even for cross assembly. */
5092 for (i
= 0; fp_const
[i
]; i
++)
5094 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
5098 *str
+= strlen (fp_const
[i
]);
5099 if (is_end_of_line
[(unsigned char) **str
])
5105 /* Just because we didn't get a match doesn't mean that the constant
5106 isn't valid, just that it is in a format that we don't
5107 automatically recognize. Try parsing it with the standard
5108 expression routines. */
5110 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
5112 /* Look for a raw floating point number. */
5113 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
5114 && is_end_of_line
[(unsigned char) *save_in
])
5116 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5118 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5120 if (words
[j
] != fp_values
[i
][j
])
5124 if (j
== MAX_LITTLENUMS
)
5132 /* Try and parse a more complex expression, this will probably fail
5133 unless the code uses a floating point prefix (eg "0f"). */
5134 save_in
= input_line_pointer
;
5135 input_line_pointer
= *str
;
5136 if (expression (&exp
) == absolute_section
5137 && exp
.X_op
== O_big
5138 && exp
.X_add_number
< 0)
5140 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5142 #define X_PRECISION 5
5143 #define E_PRECISION 15L
5144 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5146 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5148 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5150 if (words
[j
] != fp_values
[i
][j
])
5154 if (j
== MAX_LITTLENUMS
)
5156 *str
= input_line_pointer
;
5157 input_line_pointer
= save_in
;
5164 *str
= input_line_pointer
;
5165 input_line_pointer
= save_in
;
5166 inst
.error
= _("invalid FPA immediate expression");
5170 /* Returns 1 if a number has "quarter-precision" float format
5171 0baBbbbbbc defgh000 00000000 00000000. */
5174 is_quarter_float (unsigned imm
)
5176 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5177 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5181 /* Detect the presence of a floating point or integer zero constant,
5185 parse_ifimm_zero (char **in
)
5189 if (!is_immediate_prefix (**in
))
5191 /* In unified syntax, all prefixes are optional. */
5192 if (!unified_syntax
)
5198 /* Accept #0x0 as a synonym for #0. */
5199 if (strncmp (*in
, "0x", 2) == 0)
5202 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
5207 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5208 &generic_floating_point_number
);
5211 && generic_floating_point_number
.sign
== '+'
5212 && (generic_floating_point_number
.low
5213 > generic_floating_point_number
.leader
))
5219 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5220 0baBbbbbbc defgh000 00000000 00000000.
5221 The zero and minus-zero cases need special handling, since they can't be
5222 encoded in the "quarter-precision" float format, but can nonetheless be
5223 loaded as integer constants. */
5226 parse_qfloat_immediate (char **ccp
, int *immed
)
5230 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5231 int found_fpchar
= 0;
5233 skip_past_char (&str
, '#');
5235 /* We must not accidentally parse an integer as a floating-point number. Make
5236 sure that the value we parse is not an integer by checking for special
5237 characters '.' or 'e'.
5238 FIXME: This is a horrible hack, but doing better is tricky because type
5239 information isn't in a very usable state at parse time. */
5241 skip_whitespace (fpnum
);
5243 if (strncmp (fpnum
, "0x", 2) == 0)
5247 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5248 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5258 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5260 unsigned fpword
= 0;
5263 /* Our FP word must be 32 bits (single-precision FP). */
5264 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5266 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5270 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5283 /* Shift operands. */
5286 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5289 struct asm_shift_name
5292 enum shift_kind kind
;
5295 /* Third argument to parse_shift. */
5296 enum parse_shift_mode
5298 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5299 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5300 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5301 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5302 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5305 /* Parse a <shift> specifier on an ARM data processing instruction.
5306 This has three forms:
5308 (LSL|LSR|ASL|ASR|ROR) Rs
5309 (LSL|LSR|ASL|ASR|ROR) #imm
5312 Note that ASL is assimilated to LSL in the instruction encoding, and
5313 RRX to ROR #0 (which cannot be written as such). */
5316 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5318 const struct asm_shift_name
*shift_name
;
5319 enum shift_kind shift
;
5324 for (p
= *str
; ISALPHA (*p
); p
++)
5329 inst
.error
= _("shift expression expected");
5333 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5336 if (shift_name
== NULL
)
5338 inst
.error
= _("shift expression expected");
5342 shift
= shift_name
->kind
;
5346 case NO_SHIFT_RESTRICT
:
5347 case SHIFT_IMMEDIATE
: break;
5349 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5350 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5352 inst
.error
= _("'LSL' or 'ASR' required");
5357 case SHIFT_LSL_IMMEDIATE
:
5358 if (shift
!= SHIFT_LSL
)
5360 inst
.error
= _("'LSL' required");
5365 case SHIFT_ASR_IMMEDIATE
:
5366 if (shift
!= SHIFT_ASR
)
5368 inst
.error
= _("'ASR' required");
5376 if (shift
!= SHIFT_RRX
)
5378 /* Whitespace can appear here if the next thing is a bare digit. */
5379 skip_whitespace (p
);
5381 if (mode
== NO_SHIFT_RESTRICT
5382 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5384 inst
.operands
[i
].imm
= reg
;
5385 inst
.operands
[i
].immisreg
= 1;
5387 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5390 inst
.operands
[i
].shift_kind
= shift
;
5391 inst
.operands
[i
].shifted
= 1;
5396 /* Parse a <shifter_operand> for an ARM data processing instruction:
5399 #<immediate>, <rotate>
5403 where <shift> is defined by parse_shift above, and <rotate> is a
5404 multiple of 2 between 0 and 30. Validation of immediate operands
5405 is deferred to md_apply_fix. */
5408 parse_shifter_operand (char **str
, int i
)
5413 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5415 inst
.operands
[i
].reg
= value
;
5416 inst
.operands
[i
].isreg
= 1;
5418 /* parse_shift will override this if appropriate */
5419 inst
.relocs
[0].exp
.X_op
= O_constant
;
5420 inst
.relocs
[0].exp
.X_add_number
= 0;
5422 if (skip_past_comma (str
) == FAIL
)
5425 /* Shift operation on register. */
5426 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5429 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5432 if (skip_past_comma (str
) == SUCCESS
)
5434 /* #x, y -- ie explicit rotation by Y. */
5435 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5438 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5440 inst
.error
= _("constant expression expected");
5444 value
= exp
.X_add_number
;
5445 if (value
< 0 || value
> 30 || value
% 2 != 0)
5447 inst
.error
= _("invalid rotation");
5450 if (inst
.relocs
[0].exp
.X_add_number
< 0
5451 || inst
.relocs
[0].exp
.X_add_number
> 255)
5453 inst
.error
= _("invalid constant");
5457 /* Encode as specified. */
5458 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5462 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5463 inst
.relocs
[0].pc_rel
= 0;
5467 /* Group relocation information. Each entry in the table contains the
5468 textual name of the relocation as may appear in assembler source
5469 and must end with a colon.
5470 Along with this textual name are the relocation codes to be used if
5471 the corresponding instruction is an ALU instruction (ADD or SUB only),
5472 an LDR, an LDRS, or an LDC. */
5474 struct group_reloc_table_entry
5485 /* Varieties of non-ALU group relocation. */
5492 static struct group_reloc_table_entry group_reloc_table
[] =
5493 { /* Program counter relative: */
5495 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5500 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5501 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5502 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5503 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5505 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5510 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5511 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5512 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5513 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5515 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5516 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5517 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5518 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5519 /* Section base relative */
5521 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5526 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5527 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5528 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5529 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5531 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5536 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5537 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5538 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5539 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5541 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5542 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5543 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5544 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5545 /* Absolute thumb alu relocations. */
5547 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5552 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5557 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5562 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5567 /* Given the address of a pointer pointing to the textual name of a group
5568 relocation as may appear in assembler source, attempt to find its details
5569 in group_reloc_table. The pointer will be updated to the character after
5570 the trailing colon. On failure, FAIL will be returned; SUCCESS
5571 otherwise. On success, *entry will be updated to point at the relevant
5572 group_reloc_table entry. */
5575 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5578 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5580 int length
= strlen (group_reloc_table
[i
].name
);
5582 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5583 && (*str
)[length
] == ':')
5585 *out
= &group_reloc_table
[i
];
5586 *str
+= (length
+ 1);
5594 /* Parse a <shifter_operand> for an ARM data processing instruction
5595 (as for parse_shifter_operand) where group relocations are allowed:
5598 #<immediate>, <rotate>
5599 #:<group_reloc>:<expression>
5603 where <group_reloc> is one of the strings defined in group_reloc_table.
5604 The hashes are optional.
5606 Everything else is as for parse_shifter_operand. */
5608 static parse_operand_result
5609 parse_shifter_operand_group_reloc (char **str
, int i
)
5611 /* Determine if we have the sequence of characters #: or just :
5612 coming next. If we do, then we check for a group relocation.
5613 If we don't, punt the whole lot to parse_shifter_operand. */
5615 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5616 || (*str
)[0] == ':')
5618 struct group_reloc_table_entry
*entry
;
5620 if ((*str
)[0] == '#')
5625 /* Try to parse a group relocation. Anything else is an error. */
5626 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5628 inst
.error
= _("unknown group relocation");
5629 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5632 /* We now have the group relocation table entry corresponding to
5633 the name in the assembler source. Next, we parse the expression. */
5634 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5635 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5637 /* Record the relocation type (always the ALU variant here). */
5638 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5639 gas_assert (inst
.relocs
[0].type
!= 0);
5641 return PARSE_OPERAND_SUCCESS
;
5644 return parse_shifter_operand (str
, i
) == SUCCESS
5645 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5647 /* Never reached. */
5650 /* Parse a Neon alignment expression. Information is written to
5651 inst.operands[i]. We assume the initial ':' has been skipped.
5653 align .imm = align << 8, .immisalign=1, .preind=0 */
5654 static parse_operand_result
5655 parse_neon_alignment (char **str
, int i
)
5660 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5662 if (exp
.X_op
!= O_constant
)
5664 inst
.error
= _("alignment must be constant");
5665 return PARSE_OPERAND_FAIL
;
5668 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5669 inst
.operands
[i
].immisalign
= 1;
5670 /* Alignments are not pre-indexes. */
5671 inst
.operands
[i
].preind
= 0;
5674 return PARSE_OPERAND_SUCCESS
;
5677 /* Parse all forms of an ARM address expression. Information is written
5678 to inst.operands[i] and/or inst.relocs[0].
5680 Preindexed addressing (.preind=1):
5682 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5683 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5684 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5685 .shift_kind=shift .relocs[0].exp=shift_imm
5687 These three may have a trailing ! which causes .writeback to be set also.
5689 Postindexed addressing (.postind=1, .writeback=1):
5691 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5692 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5693 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5694 .shift_kind=shift .relocs[0].exp=shift_imm
5696 Unindexed addressing (.preind=0, .postind=0):
5698 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5702 [Rn]{!} shorthand for [Rn,#0]{!}
5703 =immediate .isreg=0 .relocs[0].exp=immediate
5704 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5706 It is the caller's responsibility to check for addressing modes not
5707 supported by the instruction, and to set inst.relocs[0].type. */
5709 static parse_operand_result
5710 parse_address_main (char **str
, int i
, int group_relocations
,
5711 group_reloc_type group_type
)
5716 if (skip_past_char (&p
, '[') == FAIL
)
5718 if (skip_past_char (&p
, '=') == FAIL
)
5720 /* Bare address - translate to PC-relative offset. */
5721 inst
.relocs
[0].pc_rel
= 1;
5722 inst
.operands
[i
].reg
= REG_PC
;
5723 inst
.operands
[i
].isreg
= 1;
5724 inst
.operands
[i
].preind
= 1;
5726 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5727 return PARSE_OPERAND_FAIL
;
5729 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5730 /*allow_symbol_p=*/TRUE
))
5731 return PARSE_OPERAND_FAIL
;
5734 return PARSE_OPERAND_SUCCESS
;
5737 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5738 skip_whitespace (p
);
5740 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5742 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5743 return PARSE_OPERAND_FAIL
;
5745 inst
.operands
[i
].reg
= reg
;
5746 inst
.operands
[i
].isreg
= 1;
5748 if (skip_past_comma (&p
) == SUCCESS
)
5750 inst
.operands
[i
].preind
= 1;
5753 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5755 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5757 inst
.operands
[i
].imm
= reg
;
5758 inst
.operands
[i
].immisreg
= 1;
5760 if (skip_past_comma (&p
) == SUCCESS
)
5761 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5762 return PARSE_OPERAND_FAIL
;
5764 else if (skip_past_char (&p
, ':') == SUCCESS
)
5766 /* FIXME: '@' should be used here, but it's filtered out by generic
5767 code before we get to see it here. This may be subject to
5769 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5771 if (result
!= PARSE_OPERAND_SUCCESS
)
5776 if (inst
.operands
[i
].negative
)
5778 inst
.operands
[i
].negative
= 0;
5782 if (group_relocations
5783 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5785 struct group_reloc_table_entry
*entry
;
5787 /* Skip over the #: or : sequence. */
5793 /* Try to parse a group relocation. Anything else is an
5795 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5797 inst
.error
= _("unknown group relocation");
5798 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5801 /* We now have the group relocation table entry corresponding to
5802 the name in the assembler source. Next, we parse the
5804 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5805 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5807 /* Record the relocation type. */
5812 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
5817 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5822 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
5829 if (inst
.relocs
[0].type
== 0)
5831 inst
.error
= _("this group relocation is not allowed on this instruction");
5832 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5839 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5840 return PARSE_OPERAND_FAIL
;
5841 /* If the offset is 0, find out if it's a +0 or -0. */
5842 if (inst
.relocs
[0].exp
.X_op
== O_constant
5843 && inst
.relocs
[0].exp
.X_add_number
== 0)
5845 skip_whitespace (q
);
5849 skip_whitespace (q
);
5852 inst
.operands
[i
].negative
= 1;
5857 else if (skip_past_char (&p
, ':') == SUCCESS
)
5859 /* FIXME: '@' should be used here, but it's filtered out by generic code
5860 before we get to see it here. This may be subject to change. */
5861 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5863 if (result
!= PARSE_OPERAND_SUCCESS
)
5867 if (skip_past_char (&p
, ']') == FAIL
)
5869 inst
.error
= _("']' expected");
5870 return PARSE_OPERAND_FAIL
;
5873 if (skip_past_char (&p
, '!') == SUCCESS
)
5874 inst
.operands
[i
].writeback
= 1;
5876 else if (skip_past_comma (&p
) == SUCCESS
)
5878 if (skip_past_char (&p
, '{') == SUCCESS
)
5880 /* [Rn], {expr} - unindexed, with option */
5881 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5882 0, 255, TRUE
) == FAIL
)
5883 return PARSE_OPERAND_FAIL
;
5885 if (skip_past_char (&p
, '}') == FAIL
)
5887 inst
.error
= _("'}' expected at end of 'option' field");
5888 return PARSE_OPERAND_FAIL
;
5890 if (inst
.operands
[i
].preind
)
5892 inst
.error
= _("cannot combine index with option");
5893 return PARSE_OPERAND_FAIL
;
5896 return PARSE_OPERAND_SUCCESS
;
5900 inst
.operands
[i
].postind
= 1;
5901 inst
.operands
[i
].writeback
= 1;
5903 if (inst
.operands
[i
].preind
)
5905 inst
.error
= _("cannot combine pre- and post-indexing");
5906 return PARSE_OPERAND_FAIL
;
5910 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5912 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5914 /* We might be using the immediate for alignment already. If we
5915 are, OR the register number into the low-order bits. */
5916 if (inst
.operands
[i
].immisalign
)
5917 inst
.operands
[i
].imm
|= reg
;
5919 inst
.operands
[i
].imm
= reg
;
5920 inst
.operands
[i
].immisreg
= 1;
5922 if (skip_past_comma (&p
) == SUCCESS
)
5923 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5924 return PARSE_OPERAND_FAIL
;
5930 if (inst
.operands
[i
].negative
)
5932 inst
.operands
[i
].negative
= 0;
5935 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5936 return PARSE_OPERAND_FAIL
;
5937 /* If the offset is 0, find out if it's a +0 or -0. */
5938 if (inst
.relocs
[0].exp
.X_op
== O_constant
5939 && inst
.relocs
[0].exp
.X_add_number
== 0)
5941 skip_whitespace (q
);
5945 skip_whitespace (q
);
5948 inst
.operands
[i
].negative
= 1;
5954 /* If at this point neither .preind nor .postind is set, we have a
5955 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5956 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5958 inst
.operands
[i
].preind
= 1;
5959 inst
.relocs
[0].exp
.X_op
= O_constant
;
5960 inst
.relocs
[0].exp
.X_add_number
= 0;
5963 return PARSE_OPERAND_SUCCESS
;
5967 parse_address (char **str
, int i
)
5969 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5973 static parse_operand_result
5974 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5976 return parse_address_main (str
, i
, 1, type
);
5979 /* Parse an operand for a MOVW or MOVT instruction. */
5981 parse_half (char **str
)
5986 skip_past_char (&p
, '#');
5987 if (strncasecmp (p
, ":lower16:", 9) == 0)
5988 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
5989 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5990 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
5992 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
5995 skip_whitespace (p
);
5998 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
6001 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
6003 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
6005 inst
.error
= _("constant expression expected");
6008 if (inst
.relocs
[0].exp
.X_add_number
< 0
6009 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
6011 inst
.error
= _("immediate value out of range");
6019 /* Miscellaneous. */
6021 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6022 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6024 parse_psr (char **str
, bfd_boolean lhs
)
6027 unsigned long psr_field
;
6028 const struct asm_psr
*psr
;
6030 bfd_boolean is_apsr
= FALSE
;
6031 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
6033 /* PR gas/12698: If the user has specified -march=all then m_profile will
6034 be TRUE, but we want to ignore it in this case as we are building for any
6035 CPU type, including non-m variants. */
6036 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
6039 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6040 feature for ease of use and backwards compatibility. */
6042 if (strncasecmp (p
, "SPSR", 4) == 0)
6045 goto unsupported_psr
;
6047 psr_field
= SPSR_BIT
;
6049 else if (strncasecmp (p
, "CPSR", 4) == 0)
6052 goto unsupported_psr
;
6056 else if (strncasecmp (p
, "APSR", 4) == 0)
6058 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6059 and ARMv7-R architecture CPUs. */
6068 while (ISALNUM (*p
) || *p
== '_');
6070 if (strncasecmp (start
, "iapsr", 5) == 0
6071 || strncasecmp (start
, "eapsr", 5) == 0
6072 || strncasecmp (start
, "xpsr", 4) == 0
6073 || strncasecmp (start
, "psr", 3) == 0)
6074 p
= start
+ strcspn (start
, "rR") + 1;
6076 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
6082 /* If APSR is being written, a bitfield may be specified. Note that
6083 APSR itself is handled above. */
6084 if (psr
->field
<= 3)
6086 psr_field
= psr
->field
;
6092 /* M-profile MSR instructions have the mask field set to "10", except
6093 *PSR variants which modify APSR, which may use a different mask (and
6094 have been handled already). Do that by setting the PSR_f field
6096 return psr
->field
| (lhs
? PSR_f
: 0);
6099 goto unsupported_psr
;
6105 /* A suffix follows. */
6111 while (ISALNUM (*p
) || *p
== '_');
6115 /* APSR uses a notation for bits, rather than fields. */
6116 unsigned int nzcvq_bits
= 0;
6117 unsigned int g_bit
= 0;
6120 for (bit
= start
; bit
!= p
; bit
++)
6122 switch (TOLOWER (*bit
))
6125 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
6129 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
6133 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
6137 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6141 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6145 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6149 inst
.error
= _("unexpected bit specified after APSR");
6154 if (nzcvq_bits
== 0x1f)
6159 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6161 inst
.error
= _("selected processor does not "
6162 "support DSP extension");
6169 if ((nzcvq_bits
& 0x20) != 0
6170 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6171 || (g_bit
& 0x2) != 0)
6173 inst
.error
= _("bad bitmask specified after APSR");
6179 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
6184 psr_field
|= psr
->field
;
6190 goto error
; /* Garbage after "[CS]PSR". */
6192 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6193 is deprecated, but allow it anyway. */
6197 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6200 else if (!m_profile
)
6201 /* These bits are never right for M-profile devices: don't set them
6202 (only code paths which read/write APSR reach here). */
6203 psr_field
|= (PSR_c
| PSR_f
);
6209 inst
.error
= _("selected processor does not support requested special "
6210 "purpose register");
6214 inst
.error
= _("flag for {c}psr instruction expected");
6219 parse_sys_vldr_vstr (char **str
)
6228 {"FPSCR", 0x1, 0x0},
6229 {"FPSCR_nzcvqc", 0x2, 0x0},
6232 {"FPCXTNS", 0x6, 0x1},
6233 {"FPCXTS", 0x7, 0x1}
6235 char *op_end
= strchr (*str
, ',');
6236 size_t op_strlen
= op_end
- *str
;
6238 for (i
= 0; i
< sizeof (sysregs
) / sizeof (sysregs
[0]); i
++)
6240 if (!strncmp (*str
, sysregs
[i
].name
, op_strlen
))
6242 val
= sysregs
[i
].regl
| (sysregs
[i
].regh
<< 3);
6251 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6252 value suitable for splatting into the AIF field of the instruction. */
6255 parse_cps_flags (char **str
)
6264 case '\0': case ',':
6267 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6268 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6269 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6272 inst
.error
= _("unrecognized CPS flag");
6277 if (saw_a_flag
== 0)
6279 inst
.error
= _("missing CPS flags");
6287 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6288 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6291 parse_endian_specifier (char **str
)
6296 if (strncasecmp (s
, "BE", 2))
6298 else if (strncasecmp (s
, "LE", 2))
6302 inst
.error
= _("valid endian specifiers are be or le");
6306 if (ISALNUM (s
[2]) || s
[2] == '_')
6308 inst
.error
= _("valid endian specifiers are be or le");
6313 return little_endian
;
6316 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6317 value suitable for poking into the rotate field of an sxt or sxta
6318 instruction, or FAIL on error. */
6321 parse_ror (char **str
)
6326 if (strncasecmp (s
, "ROR", 3) == 0)
6330 inst
.error
= _("missing rotation field after comma");
6334 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6339 case 0: *str
= s
; return 0x0;
6340 case 8: *str
= s
; return 0x1;
6341 case 16: *str
= s
; return 0x2;
6342 case 24: *str
= s
; return 0x3;
6345 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6350 /* Parse a conditional code (from conds[] below). The value returned is in the
6351 range 0 .. 14, or FAIL. */
6353 parse_cond (char **str
)
6356 const struct asm_cond
*c
;
6358 /* Condition codes are always 2 characters, so matching up to
6359 3 characters is sufficient. */
6364 while (ISALPHA (*q
) && n
< 3)
6366 cond
[n
] = TOLOWER (*q
);
6371 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6374 inst
.error
= _("condition required");
6382 /* Parse an option for a barrier instruction. Returns the encoding for the
6385 parse_barrier (char **str
)
6388 const struct asm_barrier_opt
*o
;
6391 while (ISALPHA (*q
))
6394 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6399 if (!mark_feature_used (&o
->arch
))
6406 /* Parse the operands of a table branch instruction. Similar to a memory
6409 parse_tb (char **str
)
6414 if (skip_past_char (&p
, '[') == FAIL
)
6416 inst
.error
= _("'[' expected");
6420 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6422 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6425 inst
.operands
[0].reg
= reg
;
6427 if (skip_past_comma (&p
) == FAIL
)
6429 inst
.error
= _("',' expected");
6433 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6435 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6438 inst
.operands
[0].imm
= reg
;
6440 if (skip_past_comma (&p
) == SUCCESS
)
6442 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6444 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6446 inst
.error
= _("invalid shift");
6449 inst
.operands
[0].shifted
= 1;
6452 if (skip_past_char (&p
, ']') == FAIL
)
6454 inst
.error
= _("']' expected");
6461 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6462 information on the types the operands can take and how they are encoded.
6463 Up to four operands may be read; this function handles setting the
6464 ".present" field for each read operand itself.
6465 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6466 else returns FAIL. */
6469 parse_neon_mov (char **str
, int *which_operand
)
6471 int i
= *which_operand
, val
;
6472 enum arm_reg_type rtype
;
6474 struct neon_type_el optype
;
6476 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6478 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6479 inst
.operands
[i
].reg
= val
;
6480 inst
.operands
[i
].isscalar
= 1;
6481 inst
.operands
[i
].vectype
= optype
;
6482 inst
.operands
[i
++].present
= 1;
6484 if (skip_past_comma (&ptr
) == FAIL
)
6487 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6490 inst
.operands
[i
].reg
= val
;
6491 inst
.operands
[i
].isreg
= 1;
6492 inst
.operands
[i
].present
= 1;
6494 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6497 /* Cases 0, 1, 2, 3, 5 (D only). */
6498 if (skip_past_comma (&ptr
) == FAIL
)
6501 inst
.operands
[i
].reg
= val
;
6502 inst
.operands
[i
].isreg
= 1;
6503 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6504 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6505 inst
.operands
[i
].isvec
= 1;
6506 inst
.operands
[i
].vectype
= optype
;
6507 inst
.operands
[i
++].present
= 1;
6509 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6511 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6512 Case 13: VMOV <Sd>, <Rm> */
6513 inst
.operands
[i
].reg
= val
;
6514 inst
.operands
[i
].isreg
= 1;
6515 inst
.operands
[i
].present
= 1;
6517 if (rtype
== REG_TYPE_NQ
)
6519 first_error (_("can't use Neon quad register here"));
6522 else if (rtype
!= REG_TYPE_VFS
)
6525 if (skip_past_comma (&ptr
) == FAIL
)
6527 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6529 inst
.operands
[i
].reg
= val
;
6530 inst
.operands
[i
].isreg
= 1;
6531 inst
.operands
[i
].present
= 1;
6534 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6537 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6538 Case 1: VMOV<c><q> <Dd>, <Dm>
6539 Case 8: VMOV.F32 <Sd>, <Sm>
6540 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6542 inst
.operands
[i
].reg
= val
;
6543 inst
.operands
[i
].isreg
= 1;
6544 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6545 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6546 inst
.operands
[i
].isvec
= 1;
6547 inst
.operands
[i
].vectype
= optype
;
6548 inst
.operands
[i
].present
= 1;
6550 if (skip_past_comma (&ptr
) == SUCCESS
)
6555 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6558 inst
.operands
[i
].reg
= val
;
6559 inst
.operands
[i
].isreg
= 1;
6560 inst
.operands
[i
++].present
= 1;
6562 if (skip_past_comma (&ptr
) == FAIL
)
6565 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6568 inst
.operands
[i
].reg
= val
;
6569 inst
.operands
[i
].isreg
= 1;
6570 inst
.operands
[i
].present
= 1;
6573 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6574 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6575 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6576 Case 10: VMOV.F32 <Sd>, #<imm>
6577 Case 11: VMOV.F64 <Dd>, #<imm> */
6578 inst
.operands
[i
].immisfloat
= 1;
6579 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6581 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6582 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6586 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6590 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6593 inst
.operands
[i
].reg
= val
;
6594 inst
.operands
[i
].isreg
= 1;
6595 inst
.operands
[i
++].present
= 1;
6597 if (skip_past_comma (&ptr
) == FAIL
)
6600 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6602 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6603 inst
.operands
[i
].reg
= val
;
6604 inst
.operands
[i
].isscalar
= 1;
6605 inst
.operands
[i
].present
= 1;
6606 inst
.operands
[i
].vectype
= optype
;
6608 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6610 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6611 inst
.operands
[i
].reg
= val
;
6612 inst
.operands
[i
].isreg
= 1;
6613 inst
.operands
[i
++].present
= 1;
6615 if (skip_past_comma (&ptr
) == FAIL
)
6618 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6621 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6625 inst
.operands
[i
].reg
= val
;
6626 inst
.operands
[i
].isreg
= 1;
6627 inst
.operands
[i
].isvec
= 1;
6628 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6629 inst
.operands
[i
].vectype
= optype
;
6630 inst
.operands
[i
].present
= 1;
6632 if (rtype
== REG_TYPE_VFS
)
6636 if (skip_past_comma (&ptr
) == FAIL
)
6638 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6641 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6644 inst
.operands
[i
].reg
= val
;
6645 inst
.operands
[i
].isreg
= 1;
6646 inst
.operands
[i
].isvec
= 1;
6647 inst
.operands
[i
].issingle
= 1;
6648 inst
.operands
[i
].vectype
= optype
;
6649 inst
.operands
[i
].present
= 1;
6652 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6656 inst
.operands
[i
].reg
= val
;
6657 inst
.operands
[i
].isreg
= 1;
6658 inst
.operands
[i
].isvec
= 1;
6659 inst
.operands
[i
].issingle
= 1;
6660 inst
.operands
[i
].vectype
= optype
;
6661 inst
.operands
[i
].present
= 1;
6666 first_error (_("parse error"));
6670 /* Successfully parsed the operands. Update args. */
6676 first_error (_("expected comma"));
6680 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6684 /* Use this macro when the operand constraints are different
6685 for ARM and THUMB (e.g. ldrd). */
6686 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6687 ((arm_operand) | ((thumb_operand) << 16))
6689 /* Matcher codes for parse_operands. */
6690 enum operand_parse_code
6692 OP_stop
, /* end of line */
6694 OP_RR
, /* ARM register */
6695 OP_RRnpc
, /* ARM register, not r15 */
6696 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6697 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6698 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6699 optional trailing ! */
6700 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6701 OP_RCP
, /* Coprocessor number */
6702 OP_RCN
, /* Coprocessor register */
6703 OP_RF
, /* FPA register */
6704 OP_RVS
, /* VFP single precision register */
6705 OP_RVD
, /* VFP double precision register (0..15) */
6706 OP_RND
, /* Neon double precision register (0..31) */
6707 OP_RNDMQ
, /* Neon double precision (0..31) or MVE vector register. */
6708 OP_RNDMQR
, /* Neon double precision (0..31), MVE vector or ARM register.
6710 OP_RNQ
, /* Neon quad precision register */
6711 OP_RNQMQ
, /* Neon quad or MVE vector register. */
6712 OP_RVSD
, /* VFP single or double precision register */
6713 OP_RNSD
, /* Neon single or double precision register */
6714 OP_RNDQ
, /* Neon double or quad precision register */
6715 OP_RNDQMQ
, /* Neon double, quad or MVE vector register. */
6716 OP_RNSDQ
, /* Neon single, double or quad precision register */
6717 OP_RNSC
, /* Neon scalar D[X] */
6718 OP_RVC
, /* VFP control register */
6719 OP_RMF
, /* Maverick F register */
6720 OP_RMD
, /* Maverick D register */
6721 OP_RMFX
, /* Maverick FX register */
6722 OP_RMDX
, /* Maverick DX register */
6723 OP_RMAX
, /* Maverick AX register */
6724 OP_RMDS
, /* Maverick DSPSC register */
6725 OP_RIWR
, /* iWMMXt wR register */
6726 OP_RIWC
, /* iWMMXt wC register */
6727 OP_RIWG
, /* iWMMXt wCG register */
6728 OP_RXA
, /* XScale accumulator register */
6730 OP_RNSDQMQ
, /* Neon single, double or quad register or MVE vector register
6732 OP_RNSDQMQR
, /* Neon single, double or quad register, MVE vector register or
6734 OP_RMQ
, /* MVE vector register. */
6736 /* New operands for Armv8.1-M Mainline. */
6737 OP_LR
, /* ARM LR register */
6738 OP_RRe
, /* ARM register, only even numbered. */
6739 OP_RRo
, /* ARM register, only odd numbered, not r13 or r15. */
6740 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
6742 OP_REGLST
, /* ARM register list */
6743 OP_CLRMLST
, /* CLRM register list */
6744 OP_VRSLST
, /* VFP single-precision register list */
6745 OP_VRDLST
, /* VFP double-precision register list */
6746 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6747 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6748 OP_NSTRLST
, /* Neon element/structure list */
6749 OP_VRSDVLST
, /* VFP single or double-precision register list and VPR */
6751 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6752 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6753 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6754 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6755 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
6756 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6757 OP_RNSDQ_RNSC_MQ
, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
6759 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6760 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6761 OP_VMOV
, /* Neon VMOV operands. */
6762 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6763 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6764 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6765 OP_VLDR
, /* VLDR operand. */
6767 OP_I0
, /* immediate zero */
6768 OP_I7
, /* immediate value 0 .. 7 */
6769 OP_I15
, /* 0 .. 15 */
6770 OP_I16
, /* 1 .. 16 */
6771 OP_I16z
, /* 0 .. 16 */
6772 OP_I31
, /* 0 .. 31 */
6773 OP_I31w
, /* 0 .. 31, optional trailing ! */
6774 OP_I32
, /* 1 .. 32 */
6775 OP_I32z
, /* 0 .. 32 */
6776 OP_I63
, /* 0 .. 63 */
6777 OP_I63s
, /* -64 .. 63 */
6778 OP_I64
, /* 1 .. 64 */
6779 OP_I64z
, /* 0 .. 64 */
6780 OP_I255
, /* 0 .. 255 */
6782 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6783 OP_I7b
, /* 0 .. 7 */
6784 OP_I15b
, /* 0 .. 15 */
6785 OP_I31b
, /* 0 .. 31 */
6787 OP_SH
, /* shifter operand */
6788 OP_SHG
, /* shifter operand with possible group relocation */
6789 OP_ADDR
, /* Memory address expression (any mode) */
6790 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6791 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6792 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6793 OP_EXP
, /* arbitrary expression */
6794 OP_EXPi
, /* same, with optional immediate prefix */
6795 OP_EXPr
, /* same, with optional relocation suffix */
6796 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
6797 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6798 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6799 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6801 OP_CPSF
, /* CPS flags */
6802 OP_ENDI
, /* Endianness specifier */
6803 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6804 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6805 OP_COND
, /* conditional code */
6806 OP_TB
, /* Table branch. */
6808 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6810 OP_RRnpc_I0
, /* ARM register or literal 0 */
6811 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6812 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6813 OP_RF_IF
, /* FPA register or immediate */
6814 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6815 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6817 /* Optional operands. */
6818 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6819 OP_oI31b
, /* 0 .. 31 */
6820 OP_oI32b
, /* 1 .. 32 */
6821 OP_oI32z
, /* 0 .. 32 */
6822 OP_oIffffb
, /* 0 .. 65535 */
6823 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6825 OP_oRR
, /* ARM register */
6826 OP_oLR
, /* ARM LR register */
6827 OP_oRRnpc
, /* ARM register, not the PC */
6828 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6829 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6830 OP_oRND
, /* Optional Neon double precision register */
6831 OP_oRNQ
, /* Optional Neon quad precision register */
6832 OP_oRNDQMQ
, /* Optional Neon double, quad or MVE vector register. */
6833 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6834 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6835 OP_oRNSDQMQ
, /* Optional single, double or quad register or MVE vector
6837 OP_oSHll
, /* LSL immediate */
6838 OP_oSHar
, /* ASR immediate */
6839 OP_oSHllar
, /* LSL or ASR immediate */
6840 OP_oROR
, /* ROR 0/8/16/24 */
6841 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6843 /* Some pre-defined mixed (ARM/THUMB) operands. */
6844 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6845 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6846 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6848 OP_FIRST_OPTIONAL
= OP_oI7b
6851 /* Generic instruction operand parser. This does no encoding and no
6852 semantic validation; it merely squirrels values away in the inst
6853 structure. Returns SUCCESS or FAIL depending on whether the
6854 specified grammar matched. */
6856 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6858 unsigned const int *upat
= pattern
;
6859 char *backtrack_pos
= 0;
6860 const char *backtrack_error
= 0;
6861 int i
, val
= 0, backtrack_index
= 0;
6862 enum arm_reg_type rtype
;
6863 parse_operand_result result
;
6864 unsigned int op_parse_code
;
6865 bfd_boolean partial_match
;
6867 #define po_char_or_fail(chr) \
6870 if (skip_past_char (&str, chr) == FAIL) \
6875 #define po_reg_or_fail(regtype) \
6878 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6879 & inst.operands[i].vectype); \
6882 first_error (_(reg_expected_msgs[regtype])); \
6885 inst.operands[i].reg = val; \
6886 inst.operands[i].isreg = 1; \
6887 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6888 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6889 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6890 || rtype == REG_TYPE_VFD \
6891 || rtype == REG_TYPE_NQ); \
6895 #define po_reg_or_goto(regtype, label) \
6898 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6899 & inst.operands[i].vectype); \
6903 inst.operands[i].reg = val; \
6904 inst.operands[i].isreg = 1; \
6905 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6906 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6907 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6908 || rtype == REG_TYPE_VFD \
6909 || rtype == REG_TYPE_NQ); \
6913 #define po_imm_or_fail(min, max, popt) \
6916 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6918 inst.operands[i].imm = val; \
6922 #define po_scalar_or_goto(elsz, label) \
6925 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6928 inst.operands[i].reg = val; \
6929 inst.operands[i].isscalar = 1; \
6933 #define po_misc_or_fail(expr) \
6941 #define po_misc_or_fail_no_backtrack(expr) \
6945 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6946 backtrack_pos = 0; \
6947 if (result != PARSE_OPERAND_SUCCESS) \
6952 #define po_barrier_or_imm(str) \
6955 val = parse_barrier (&str); \
6956 if (val == FAIL && ! ISALPHA (*str)) \
6959 /* ISB can only take SY as an option. */ \
6960 || ((inst.instruction & 0xf0) == 0x60 \
6963 inst.error = _("invalid barrier type"); \
6964 backtrack_pos = 0; \
6970 skip_whitespace (str
);
6972 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6974 op_parse_code
= upat
[i
];
6975 if (op_parse_code
>= 1<<16)
6976 op_parse_code
= thumb
? (op_parse_code
>> 16)
6977 : (op_parse_code
& ((1<<16)-1));
6979 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6981 /* Remember where we are in case we need to backtrack. */
6982 gas_assert (!backtrack_pos
);
6983 backtrack_pos
= str
;
6984 backtrack_error
= inst
.error
;
6985 backtrack_index
= i
;
6988 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6989 po_char_or_fail (',');
6991 switch (op_parse_code
)
7003 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
7004 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
7005 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
7006 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
7007 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
7008 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
7011 po_reg_or_goto (REG_TYPE_RN
, try_rndmq
);
7015 po_reg_or_goto (REG_TYPE_MQ
, try_rnd
);
7018 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
7020 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
7022 /* Also accept generic coprocessor regs for unknown registers. */
7024 po_reg_or_fail (REG_TYPE_CN
);
7026 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
7027 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
7028 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
7029 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
7030 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
7031 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
7032 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
7033 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
7034 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
7035 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
7038 po_reg_or_goto (REG_TYPE_MQ
, try_nq
);
7041 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
7042 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
7045 po_reg_or_goto (REG_TYPE_MQ
, try_rndq
);
7049 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
7050 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
7052 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
7054 po_reg_or_goto (REG_TYPE_RN
, try_mq
);
7059 po_reg_or_goto (REG_TYPE_MQ
, try_nsdq2
);
7062 po_reg_or_fail (REG_TYPE_NSDQ
);
7066 po_reg_or_fail (REG_TYPE_MQ
);
7068 /* Neon scalar. Using an element size of 8 means that some invalid
7069 scalars are accepted here, so deal with those in later code. */
7070 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
7074 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
7077 po_imm_or_fail (0, 0, TRUE
);
7082 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
7087 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
7090 if (parse_ifimm_zero (&str
))
7091 inst
.operands
[i
].imm
= 0;
7095 = _("only floating point zero is allowed as immediate value");
7103 po_scalar_or_goto (8, try_rr
);
7106 po_reg_or_fail (REG_TYPE_RN
);
7110 case OP_RNSDQ_RNSC_MQ
:
7111 po_reg_or_goto (REG_TYPE_MQ
, try_rnsdq_rnsc
);
7116 po_scalar_or_goto (8, try_nsdq
);
7119 po_reg_or_fail (REG_TYPE_NSDQ
);
7125 po_scalar_or_goto (8, try_s_scalar
);
7128 po_scalar_or_goto (4, try_nsd
);
7131 po_reg_or_fail (REG_TYPE_NSD
);
7137 po_scalar_or_goto (8, try_ndq
);
7140 po_reg_or_fail (REG_TYPE_NDQ
);
7146 po_scalar_or_goto (8, try_vfd
);
7149 po_reg_or_fail (REG_TYPE_VFD
);
7154 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7155 not careful then bad things might happen. */
7156 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
7161 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
7164 /* There's a possibility of getting a 64-bit immediate here, so
7165 we need special handling. */
7166 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
7169 inst
.error
= _("immediate value is out of range");
7177 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
7180 po_imm_or_fail (0, 63, TRUE
);
7185 po_char_or_fail ('[');
7186 po_reg_or_fail (REG_TYPE_RN
);
7187 po_char_or_fail (']');
7193 po_reg_or_fail (REG_TYPE_RN
);
7194 if (skip_past_char (&str
, '!') == SUCCESS
)
7195 inst
.operands
[i
].writeback
= 1;
7199 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
7200 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
7201 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
7202 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
7203 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
7204 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
7205 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
7206 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
7207 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
7208 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
7209 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
7210 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
7212 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
7214 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
7215 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
7217 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
7218 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
7219 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
7220 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
7222 /* Immediate variants */
7224 po_char_or_fail ('{');
7225 po_imm_or_fail (0, 255, TRUE
);
7226 po_char_or_fail ('}');
7230 /* The expression parser chokes on a trailing !, so we have
7231 to find it first and zap it. */
7234 while (*s
&& *s
!= ',')
7239 inst
.operands
[i
].writeback
= 1;
7241 po_imm_or_fail (0, 31, TRUE
);
7249 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7254 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7259 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7261 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7263 val
= parse_reloc (&str
);
7266 inst
.error
= _("unrecognized relocation suffix");
7269 else if (val
!= BFD_RELOC_UNUSED
)
7271 inst
.operands
[i
].imm
= val
;
7272 inst
.operands
[i
].hasreloc
= 1;
7278 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7280 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7282 inst
.operands
[i
].hasreloc
= 1;
7284 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7286 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7287 inst
.operands
[i
].hasreloc
= 0;
7291 /* Operand for MOVW or MOVT. */
7293 po_misc_or_fail (parse_half (&str
));
7296 /* Register or expression. */
7297 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7298 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7300 /* Register or immediate. */
7301 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7302 I0
: po_imm_or_fail (0, 0, FALSE
); break;
7304 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7306 if (!is_immediate_prefix (*str
))
7309 val
= parse_fpa_immediate (&str
);
7312 /* FPA immediates are encoded as registers 8-15.
7313 parse_fpa_immediate has already applied the offset. */
7314 inst
.operands
[i
].reg
= val
;
7315 inst
.operands
[i
].isreg
= 1;
7318 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7319 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7321 /* Two kinds of register. */
7324 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7326 || (rege
->type
!= REG_TYPE_MMXWR
7327 && rege
->type
!= REG_TYPE_MMXWC
7328 && rege
->type
!= REG_TYPE_MMXWCG
))
7330 inst
.error
= _("iWMMXt data or control register expected");
7333 inst
.operands
[i
].reg
= rege
->number
;
7334 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7340 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7342 || (rege
->type
!= REG_TYPE_MMXWC
7343 && rege
->type
!= REG_TYPE_MMXWCG
))
7345 inst
.error
= _("iWMMXt control register expected");
7348 inst
.operands
[i
].reg
= rege
->number
;
7349 inst
.operands
[i
].isreg
= 1;
7354 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7355 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7356 case OP_oROR
: val
= parse_ror (&str
); break;
7357 case OP_COND
: val
= parse_cond (&str
); break;
7358 case OP_oBARRIER_I15
:
7359 po_barrier_or_imm (str
); break;
7361 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7367 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7368 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7370 inst
.error
= _("Banked registers are not available with this "
7376 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7380 po_reg_or_goto (REG_TYPE_VFSD
, try_sysreg
);
7383 val
= parse_sys_vldr_vstr (&str
);
7387 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7390 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7392 if (strncasecmp (str
, "APSR_", 5) == 0)
7399 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7400 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7401 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7402 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7403 default: found
= 16;
7407 inst
.operands
[i
].isvec
= 1;
7408 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7409 inst
.operands
[i
].reg
= REG_PC
;
7416 po_misc_or_fail (parse_tb (&str
));
7419 /* Register lists. */
7421 val
= parse_reg_list (&str
, REGLIST_RN
);
7424 inst
.operands
[i
].writeback
= 1;
7430 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7434 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
,
7439 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
,
7444 /* Allow Q registers too. */
7445 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7446 REGLIST_NEON_D
, &partial_match
);
7450 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7451 REGLIST_VFP_S
, &partial_match
);
7452 inst
.operands
[i
].issingle
= 1;
7457 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7458 REGLIST_VFP_D_VPR
, &partial_match
);
7459 if (val
== FAIL
&& !partial_match
)
7462 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7463 REGLIST_VFP_S_VPR
, &partial_match
);
7464 inst
.operands
[i
].issingle
= 1;
7469 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7470 REGLIST_NEON_D
, &partial_match
);
7474 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7475 &inst
.operands
[i
].vectype
);
7478 /* Addressing modes */
7480 po_misc_or_fail (parse_address (&str
, i
));
7484 po_misc_or_fail_no_backtrack (
7485 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7489 po_misc_or_fail_no_backtrack (
7490 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7494 po_misc_or_fail_no_backtrack (
7495 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7499 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7503 po_misc_or_fail_no_backtrack (
7504 parse_shifter_operand_group_reloc (&str
, i
));
7508 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7512 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7516 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7520 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7523 /* Various value-based sanity checks and shared operations. We
7524 do not signal immediate failures for the register constraints;
7525 this allows a syntax error to take precedence. */
7526 switch (op_parse_code
)
7534 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7535 inst
.error
= BAD_PC
;
7540 if (inst
.operands
[i
].isreg
)
7542 if (inst
.operands
[i
].reg
== REG_PC
)
7543 inst
.error
= BAD_PC
;
7544 else if (inst
.operands
[i
].reg
== REG_SP
7545 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7546 relaxed since ARMv8-A. */
7547 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7550 inst
.error
= BAD_SP
;
7556 if (inst
.operands
[i
].isreg
7557 && inst
.operands
[i
].reg
== REG_PC
7558 && (inst
.operands
[i
].writeback
|| thumb
))
7559 inst
.error
= BAD_PC
;
7563 if (inst
.operands
[i
].isreg
)
7572 case OP_oBARRIER_I15
:
7583 inst
.operands
[i
].imm
= val
;
7588 if (inst
.operands
[i
].reg
!= REG_LR
)
7589 inst
.error
= _("operand must be LR register");
7593 if (inst
.operands
[i
].isreg
7594 && (inst
.operands
[i
].reg
& 0x00000001) != 0)
7595 inst
.error
= BAD_ODD
;
7599 if (inst
.operands
[i
].isreg
)
7601 if ((inst
.operands
[i
].reg
& 0x00000001) != 1)
7602 inst
.error
= BAD_EVEN
;
7603 else if (inst
.operands
[i
].reg
== REG_SP
)
7604 as_tsktsk (MVE_BAD_SP
);
7605 else if (inst
.operands
[i
].reg
== REG_PC
)
7606 inst
.error
= BAD_PC
;
7614 /* If we get here, this operand was successfully parsed. */
7615 inst
.operands
[i
].present
= 1;
7619 inst
.error
= BAD_ARGS
;
7624 /* The parse routine should already have set inst.error, but set a
7625 default here just in case. */
7627 inst
.error
= BAD_SYNTAX
;
7631 /* Do not backtrack over a trailing optional argument that
7632 absorbed some text. We will only fail again, with the
7633 'garbage following instruction' error message, which is
7634 probably less helpful than the current one. */
7635 if (backtrack_index
== i
&& backtrack_pos
!= str
7636 && upat
[i
+1] == OP_stop
)
7639 inst
.error
= BAD_SYNTAX
;
7643 /* Try again, skipping the optional argument at backtrack_pos. */
7644 str
= backtrack_pos
;
7645 inst
.error
= backtrack_error
;
7646 inst
.operands
[backtrack_index
].present
= 0;
7647 i
= backtrack_index
;
7651 /* Check that we have parsed all the arguments. */
7652 if (*str
!= '\0' && !inst
.error
)
7653 inst
.error
= _("garbage following instruction");
7655 return inst
.error
? FAIL
: SUCCESS
;
7658 #undef po_char_or_fail
7659 #undef po_reg_or_fail
7660 #undef po_reg_or_goto
7661 #undef po_imm_or_fail
7662 #undef po_scalar_or_fail
7663 #undef po_barrier_or_imm
7665 /* Shorthand macro for instruction encoding functions issuing errors. */
7666 #define constraint(expr, err) \
7677 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7678 instructions are unpredictable if these registers are used. This
7679 is the BadReg predicate in ARM's Thumb-2 documentation.
7681 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7682 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7683 #define reject_bad_reg(reg) \
7685 if (reg == REG_PC) \
7687 inst.error = BAD_PC; \
7690 else if (reg == REG_SP \
7691 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7693 inst.error = BAD_SP; \
7698 /* If REG is R13 (the stack pointer), warn that its use is
7700 #define warn_deprecated_sp(reg) \
7702 if (warn_on_deprecated && reg == REG_SP) \
7703 as_tsktsk (_("use of r13 is deprecated")); \
7706 /* Functions for operand encoding. ARM, then Thumb. */
7708 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7710 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7712 The only binary encoding difference is the Coprocessor number. Coprocessor
7713 9 is used for half-precision calculations or conversions. The format of the
7714 instruction is the same as the equivalent Coprocessor 10 instruction that
7715 exists for Single-Precision operation. */
7718 do_scalar_fp16_v82_encode (void)
7720 if (inst
.cond
< COND_ALWAYS
)
7721 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7722 " the behaviour is UNPREDICTABLE"));
7723 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7726 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7727 mark_feature_used (&arm_ext_fp16
);
7730 /* If VAL can be encoded in the immediate field of an ARM instruction,
7731 return the encoded form. Otherwise, return FAIL. */
7734 encode_arm_immediate (unsigned int val
)
7741 for (i
= 2; i
< 32; i
+= 2)
7742 if ((a
= rotate_left (val
, i
)) <= 0xff)
7743 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7748 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7749 return the encoded form. Otherwise, return FAIL. */
7751 encode_thumb32_immediate (unsigned int val
)
7758 for (i
= 1; i
<= 24; i
++)
7761 if ((val
& ~(0xff << i
)) == 0)
7762 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7766 if (val
== ((a
<< 16) | a
))
7768 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7772 if (val
== ((a
<< 16) | a
))
7773 return 0x200 | (a
>> 8);
7777 /* Encode a VFP SP or DP register number into inst.instruction. */
7780 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7782 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7785 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7788 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7791 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7796 first_error (_("D register out of range for selected VFP version"));
7804 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7808 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7812 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7816 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7820 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7824 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7832 /* Encode a <shift> in an ARM-format instruction. The immediate,
7833 if any, is handled by md_apply_fix. */
7835 encode_arm_shift (int i
)
7837 /* register-shifted register. */
7838 if (inst
.operands
[i
].immisreg
)
7841 for (op_index
= 0; op_index
<= i
; ++op_index
)
7843 /* Check the operand only when it's presented. In pre-UAL syntax,
7844 if the destination register is the same as the first operand, two
7845 register form of the instruction can be used. */
7846 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7847 && inst
.operands
[op_index
].reg
== REG_PC
)
7848 as_warn (UNPRED_REG ("r15"));
7851 if (inst
.operands
[i
].imm
== REG_PC
)
7852 as_warn (UNPRED_REG ("r15"));
7855 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7856 inst
.instruction
|= SHIFT_ROR
<< 5;
7859 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7860 if (inst
.operands
[i
].immisreg
)
7862 inst
.instruction
|= SHIFT_BY_REG
;
7863 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7866 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7871 encode_arm_shifter_operand (int i
)
7873 if (inst
.operands
[i
].isreg
)
7875 inst
.instruction
|= inst
.operands
[i
].reg
;
7876 encode_arm_shift (i
);
7880 inst
.instruction
|= INST_IMMEDIATE
;
7881 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
7882 inst
.instruction
|= inst
.operands
[i
].imm
;
7886 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7888 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7891 Generate an error if the operand is not a register. */
7892 constraint (!inst
.operands
[i
].isreg
,
7893 _("Instruction does not support =N addresses"));
7895 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7897 if (inst
.operands
[i
].preind
)
7901 inst
.error
= _("instruction does not accept preindexed addressing");
7904 inst
.instruction
|= PRE_INDEX
;
7905 if (inst
.operands
[i
].writeback
)
7906 inst
.instruction
|= WRITE_BACK
;
7909 else if (inst
.operands
[i
].postind
)
7911 gas_assert (inst
.operands
[i
].writeback
);
7913 inst
.instruction
|= WRITE_BACK
;
7915 else /* unindexed - only for coprocessor */
7917 inst
.error
= _("instruction does not accept unindexed addressing");
7921 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7922 && (((inst
.instruction
& 0x000f0000) >> 16)
7923 == ((inst
.instruction
& 0x0000f000) >> 12)))
7924 as_warn ((inst
.instruction
& LOAD_BIT
)
7925 ? _("destination register same as write-back base")
7926 : _("source register same as write-back base"));
7929 /* inst.operands[i] was set up by parse_address. Encode it into an
7930 ARM-format mode 2 load or store instruction. If is_t is true,
7931 reject forms that cannot be used with a T instruction (i.e. not
7934 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7936 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7938 encode_arm_addr_mode_common (i
, is_t
);
7940 if (inst
.operands
[i
].immisreg
)
7942 constraint ((inst
.operands
[i
].imm
== REG_PC
7943 || (is_pc
&& inst
.operands
[i
].writeback
)),
7945 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7946 inst
.instruction
|= inst
.operands
[i
].imm
;
7947 if (!inst
.operands
[i
].negative
)
7948 inst
.instruction
|= INDEX_UP
;
7949 if (inst
.operands
[i
].shifted
)
7951 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7952 inst
.instruction
|= SHIFT_ROR
<< 5;
7955 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7956 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7960 else /* immediate offset in inst.relocs[0] */
7962 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
7964 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7966 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7967 cannot use PC in addressing.
7968 PC cannot be used in writeback addressing, either. */
7969 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7972 /* Use of PC in str is deprecated for ARMv7. */
7973 if (warn_on_deprecated
7975 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7976 as_tsktsk (_("use of PC in this instruction is deprecated"));
7979 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7981 /* Prefer + for zero encoded value. */
7982 if (!inst
.operands
[i
].negative
)
7983 inst
.instruction
|= INDEX_UP
;
7984 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
7989 /* inst.operands[i] was set up by parse_address. Encode it into an
7990 ARM-format mode 3 load or store instruction. Reject forms that
7991 cannot be used with such instructions. If is_t is true, reject
7992 forms that cannot be used with a T instruction (i.e. not
7995 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7997 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7999 inst
.error
= _("instruction does not accept scaled register index");
8003 encode_arm_addr_mode_common (i
, is_t
);
8005 if (inst
.operands
[i
].immisreg
)
8007 constraint ((inst
.operands
[i
].imm
== REG_PC
8008 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
8010 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
8012 inst
.instruction
|= inst
.operands
[i
].imm
;
8013 if (!inst
.operands
[i
].negative
)
8014 inst
.instruction
|= INDEX_UP
;
8016 else /* immediate offset in inst.relocs[0] */
8018 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
8019 && inst
.operands
[i
].writeback
),
8021 inst
.instruction
|= HWOFFSET_IMM
;
8022 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8024 /* Prefer + for zero encoded value. */
8025 if (!inst
.operands
[i
].negative
)
8026 inst
.instruction
|= INDEX_UP
;
8028 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
8033 /* Write immediate bits [7:0] to the following locations:
8035 |28/24|23 19|18 16|15 4|3 0|
8036 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8038 This function is used by VMOV/VMVN/VORR/VBIC. */
8041 neon_write_immbits (unsigned immbits
)
8043 inst
.instruction
|= immbits
& 0xf;
8044 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
8045 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
8048 /* Invert low-order SIZE bits of XHI:XLO. */
8051 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
8053 unsigned immlo
= xlo
? *xlo
: 0;
8054 unsigned immhi
= xhi
? *xhi
: 0;
8059 immlo
= (~immlo
) & 0xff;
8063 immlo
= (~immlo
) & 0xffff;
8067 immhi
= (~immhi
) & 0xffffffff;
8071 immlo
= (~immlo
) & 0xffffffff;
8085 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8089 neon_bits_same_in_bytes (unsigned imm
)
8091 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
8092 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
8093 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
8094 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
8097 /* For immediate of above form, return 0bABCD. */
8100 neon_squash_bits (unsigned imm
)
8102 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
8103 | ((imm
& 0x01000000) >> 21);
8106 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8109 neon_qfloat_bits (unsigned imm
)
8111 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
8114 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8115 the instruction. *OP is passed as the initial value of the op field, and
8116 may be set to a different value depending on the constant (i.e.
8117 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8118 MVN). If the immediate looks like a repeated pattern then also
8119 try smaller element sizes. */
8122 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
8123 unsigned *immbits
, int *op
, int size
,
8124 enum neon_el_type type
)
8126 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8128 if (type
== NT_float
&& !float_p
)
8131 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
8133 if (size
!= 32 || *op
== 1)
8135 *immbits
= neon_qfloat_bits (immlo
);
8141 if (neon_bits_same_in_bytes (immhi
)
8142 && neon_bits_same_in_bytes (immlo
))
8146 *immbits
= (neon_squash_bits (immhi
) << 4)
8147 | neon_squash_bits (immlo
);
8158 if (immlo
== (immlo
& 0x000000ff))
8163 else if (immlo
== (immlo
& 0x0000ff00))
8165 *immbits
= immlo
>> 8;
8168 else if (immlo
== (immlo
& 0x00ff0000))
8170 *immbits
= immlo
>> 16;
8173 else if (immlo
== (immlo
& 0xff000000))
8175 *immbits
= immlo
>> 24;
8178 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
8180 *immbits
= (immlo
>> 8) & 0xff;
8183 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
8185 *immbits
= (immlo
>> 16) & 0xff;
8189 if ((immlo
& 0xffff) != (immlo
>> 16))
8196 if (immlo
== (immlo
& 0x000000ff))
8201 else if (immlo
== (immlo
& 0x0000ff00))
8203 *immbits
= immlo
>> 8;
8207 if ((immlo
& 0xff) != (immlo
>> 8))
8212 if (immlo
== (immlo
& 0x000000ff))
8214 /* Don't allow MVN with 8-bit immediate. */
8224 #if defined BFD_HOST_64_BIT
8225 /* Returns TRUE if double precision value V may be cast
8226 to single precision without loss of accuracy. */
8229 is_double_a_single (bfd_int64_t v
)
8231 int exp
= (int)((v
>> 52) & 0x7FF);
8232 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8234 return (exp
== 0 || exp
== 0x7FF
8235 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
8236 && (mantissa
& 0x1FFFFFFFl
) == 0;
8239 /* Returns a double precision value casted to single precision
8240 (ignoring the least significant bits in exponent and mantissa). */
8243 double_to_single (bfd_int64_t v
)
8245 int sign
= (int) ((v
>> 63) & 1l);
8246 int exp
= (int) ((v
>> 52) & 0x7FF);
8247 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8253 exp
= exp
- 1023 + 127;
8262 /* No denormalized numbers. */
8268 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8270 #endif /* BFD_HOST_64_BIT */
8279 static void do_vfp_nsyn_opcode (const char *);
8281 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8282 Determine whether it can be performed with a move instruction; if
8283 it can, convert inst.instruction to that move instruction and
8284 return TRUE; if it can't, convert inst.instruction to a literal-pool
8285 load and return FALSE. If this is not a valid thing to do in the
8286 current context, set inst.error and return TRUE.
8288 inst.operands[i] describes the destination register. */
8291 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
8294 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
8295 bfd_boolean arm_p
= (t
== CONST_ARM
);
8298 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8302 if ((inst
.instruction
& tbit
) == 0)
8304 inst
.error
= _("invalid pseudo operation");
8308 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8309 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8310 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8312 inst
.error
= _("constant expression expected");
8316 if (inst
.relocs
[0].exp
.X_op
== O_constant
8317 || inst
.relocs
[0].exp
.X_op
== O_big
)
8319 #if defined BFD_HOST_64_BIT
8324 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8326 LITTLENUM_TYPE w
[X_PRECISION
];
8329 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8331 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8333 /* FIXME: Should we check words w[2..5] ? */
8338 #if defined BFD_HOST_64_BIT
8340 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
8341 << LITTLENUM_NUMBER_OF_BITS
)
8342 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
8343 << LITTLENUM_NUMBER_OF_BITS
)
8344 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
8345 << LITTLENUM_NUMBER_OF_BITS
)
8346 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
8348 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
8349 | (l
[0] & LITTLENUM_MASK
);
8353 v
= inst
.relocs
[0].exp
.X_add_number
;
8355 if (!inst
.operands
[i
].issingle
)
8359 /* LDR should not use lead in a flag-setting instruction being
8360 chosen so we do not check whether movs can be used. */
8362 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8363 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8364 && inst
.operands
[i
].reg
!= 13
8365 && inst
.operands
[i
].reg
!= 15)
8367 /* Check if on thumb2 it can be done with a mov.w, mvn or
8368 movw instruction. */
8369 unsigned int newimm
;
8370 bfd_boolean isNegated
;
8372 newimm
= encode_thumb32_immediate (v
);
8373 if (newimm
!= (unsigned int) FAIL
)
8377 newimm
= encode_thumb32_immediate (~v
);
8378 if (newimm
!= (unsigned int) FAIL
)
8382 /* The number can be loaded with a mov.w or mvn
8384 if (newimm
!= (unsigned int) FAIL
8385 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8387 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8388 | (inst
.operands
[i
].reg
<< 8));
8389 /* Change to MOVN. */
8390 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8391 inst
.instruction
|= (newimm
& 0x800) << 15;
8392 inst
.instruction
|= (newimm
& 0x700) << 4;
8393 inst
.instruction
|= (newimm
& 0x0ff);
8396 /* The number can be loaded with a movw instruction. */
8397 else if ((v
& ~0xFFFF) == 0
8398 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8400 int imm
= v
& 0xFFFF;
8402 inst
.instruction
= 0xf2400000; /* MOVW. */
8403 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8404 inst
.instruction
|= (imm
& 0xf000) << 4;
8405 inst
.instruction
|= (imm
& 0x0800) << 15;
8406 inst
.instruction
|= (imm
& 0x0700) << 4;
8407 inst
.instruction
|= (imm
& 0x00ff);
8414 int value
= encode_arm_immediate (v
);
8418 /* This can be done with a mov instruction. */
8419 inst
.instruction
&= LITERAL_MASK
;
8420 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8421 inst
.instruction
|= value
& 0xfff;
8425 value
= encode_arm_immediate (~ v
);
8428 /* This can be done with a mvn instruction. */
8429 inst
.instruction
&= LITERAL_MASK
;
8430 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8431 inst
.instruction
|= value
& 0xfff;
8435 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8438 unsigned immbits
= 0;
8439 unsigned immlo
= inst
.operands
[1].imm
;
8440 unsigned immhi
= inst
.operands
[1].regisimm
8441 ? inst
.operands
[1].reg
8442 : inst
.relocs
[0].exp
.X_unsigned
8444 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8445 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8446 &op
, 64, NT_invtype
);
8450 neon_invert_size (&immlo
, &immhi
, 64);
8452 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8453 &op
, 64, NT_invtype
);
8458 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8464 /* Fill other bits in vmov encoding for both thumb and arm. */
8466 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8468 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8469 neon_write_immbits (immbits
);
8477 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8478 if (inst
.operands
[i
].issingle
8479 && is_quarter_float (inst
.operands
[1].imm
)
8480 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8482 inst
.operands
[1].imm
=
8483 neon_qfloat_bits (v
);
8484 do_vfp_nsyn_opcode ("fconsts");
8488 /* If our host does not support a 64-bit type then we cannot perform
8489 the following optimization. This mean that there will be a
8490 discrepancy between the output produced by an assembler built for
8491 a 32-bit-only host and the output produced from a 64-bit host, but
8492 this cannot be helped. */
8493 #if defined BFD_HOST_64_BIT
8494 else if (!inst
.operands
[1].issingle
8495 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8497 if (is_double_a_single (v
)
8498 && is_quarter_float (double_to_single (v
)))
8500 inst
.operands
[1].imm
=
8501 neon_qfloat_bits (double_to_single (v
));
8502 do_vfp_nsyn_opcode ("fconstd");
8510 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8511 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8514 inst
.operands
[1].reg
= REG_PC
;
8515 inst
.operands
[1].isreg
= 1;
8516 inst
.operands
[1].preind
= 1;
8517 inst
.relocs
[0].pc_rel
= 1;
8518 inst
.relocs
[0].type
= (thumb_p
8519 ? BFD_RELOC_ARM_THUMB_OFFSET
8521 ? BFD_RELOC_ARM_HWLITERAL
8522 : BFD_RELOC_ARM_LITERAL
));
8526 /* inst.operands[i] was set up by parse_address. Encode it into an
8527 ARM-format instruction. Reject all forms which cannot be encoded
8528 into a coprocessor load/store instruction. If wb_ok is false,
8529 reject use of writeback; if unind_ok is false, reject use of
8530 unindexed addressing. If reloc_override is not 0, use it instead
8531 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8532 (in which case it is preserved). */
8535 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8537 if (!inst
.operands
[i
].isreg
)
8540 if (! inst
.operands
[0].isvec
)
8542 inst
.error
= _("invalid co-processor operand");
8545 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8549 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8551 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8553 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8555 gas_assert (!inst
.operands
[i
].writeback
);
8558 inst
.error
= _("instruction does not support unindexed addressing");
8561 inst
.instruction
|= inst
.operands
[i
].imm
;
8562 inst
.instruction
|= INDEX_UP
;
8566 if (inst
.operands
[i
].preind
)
8567 inst
.instruction
|= PRE_INDEX
;
8569 if (inst
.operands
[i
].writeback
)
8571 if (inst
.operands
[i
].reg
== REG_PC
)
8573 inst
.error
= _("pc may not be used with write-back");
8578 inst
.error
= _("instruction does not support writeback");
8581 inst
.instruction
|= WRITE_BACK
;
8585 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
8586 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8587 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
8588 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8591 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8593 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8596 /* Prefer + for zero encoded value. */
8597 if (!inst
.operands
[i
].negative
)
8598 inst
.instruction
|= INDEX_UP
;
8603 /* Functions for instruction encoding, sorted by sub-architecture.
8604 First some generics; their names are taken from the conventional
8605 bit positions for register arguments in ARM format instructions. */
8615 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8621 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8627 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8628 inst
.instruction
|= inst
.operands
[1].reg
;
8634 inst
.instruction
|= inst
.operands
[0].reg
;
8635 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8641 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8642 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8648 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8649 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8655 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8656 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8660 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8662 if (ARM_CPU_IS_ANY (cpu_variant
))
8664 as_tsktsk ("%s", msg
);
8667 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8679 unsigned Rn
= inst
.operands
[2].reg
;
8680 /* Enforce restrictions on SWP instruction. */
8681 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8683 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8684 _("Rn must not overlap other operands"));
8686 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8688 if (!check_obsolete (&arm_ext_v8
,
8689 _("swp{b} use is obsoleted for ARMv8 and later"))
8690 && warn_on_deprecated
8691 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8692 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8695 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8696 inst
.instruction
|= inst
.operands
[1].reg
;
8697 inst
.instruction
|= Rn
<< 16;
8703 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8704 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8705 inst
.instruction
|= inst
.operands
[2].reg
;
8711 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8712 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
8713 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
8714 || inst
.relocs
[0].exp
.X_add_number
!= 0),
8716 inst
.instruction
|= inst
.operands
[0].reg
;
8717 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8718 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8724 inst
.instruction
|= inst
.operands
[0].imm
;
8730 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8731 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8734 /* ARM instructions, in alphabetical order by function name (except
8735 that wrapper functions appear immediately after the function they
8738 /* This is a pseudo-op of the form "adr rd, label" to be converted
8739 into a relative address of the form "add rd, pc, #label-.-8". */
8744 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8746 /* Frag hacking will turn this into a sub instruction if the offset turns
8747 out to be negative. */
8748 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
8749 inst
.relocs
[0].pc_rel
= 1;
8750 inst
.relocs
[0].exp
.X_add_number
-= 8;
8752 if (support_interwork
8753 && inst
.relocs
[0].exp
.X_op
== O_symbol
8754 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8755 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8756 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8757 inst
.relocs
[0].exp
.X_add_number
|= 1;
8760 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8761 into a relative address of the form:
8762 add rd, pc, #low(label-.-8)"
8763 add rd, rd, #high(label-.-8)" */
8768 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8770 /* Frag hacking will turn this into a sub instruction if the offset turns
8771 out to be negative. */
8772 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8773 inst
.relocs
[0].pc_rel
= 1;
8774 inst
.size
= INSN_SIZE
* 2;
8775 inst
.relocs
[0].exp
.X_add_number
-= 8;
8777 if (support_interwork
8778 && inst
.relocs
[0].exp
.X_op
== O_symbol
8779 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8780 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8781 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8782 inst
.relocs
[0].exp
.X_add_number
|= 1;
8788 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8789 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8791 if (!inst
.operands
[1].present
)
8792 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8793 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8794 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8795 encode_arm_shifter_operand (2);
8801 if (inst
.operands
[0].present
)
8802 inst
.instruction
|= inst
.operands
[0].imm
;
8804 inst
.instruction
|= 0xf;
8810 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8811 constraint (msb
> 32, _("bit-field extends past end of register"));
8812 /* The instruction encoding stores the LSB and MSB,
8813 not the LSB and width. */
8814 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8815 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8816 inst
.instruction
|= (msb
- 1) << 16;
8824 /* #0 in second position is alternative syntax for bfc, which is
8825 the same instruction but with REG_PC in the Rm field. */
8826 if (!inst
.operands
[1].isreg
)
8827 inst
.operands
[1].reg
= REG_PC
;
8829 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8830 constraint (msb
> 32, _("bit-field extends past end of register"));
8831 /* The instruction encoding stores the LSB and MSB,
8832 not the LSB and width. */
8833 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8834 inst
.instruction
|= inst
.operands
[1].reg
;
8835 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8836 inst
.instruction
|= (msb
- 1) << 16;
8842 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8843 _("bit-field extends past end of register"));
8844 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8845 inst
.instruction
|= inst
.operands
[1].reg
;
8846 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8847 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8850 /* ARM V5 breakpoint instruction (argument parse)
8851 BKPT <16 bit unsigned immediate>
8852 Instruction is not conditional.
8853 The bit pattern given in insns[] has the COND_ALWAYS condition,
8854 and it is an error if the caller tried to override that. */
8859 /* Top 12 of 16 bits to bits 19:8. */
8860 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8862 /* Bottom 4 of 16 bits to bits 3:0. */
8863 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8867 encode_branch (int default_reloc
)
8869 if (inst
.operands
[0].hasreloc
)
8871 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8872 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8873 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8874 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8875 ? BFD_RELOC_ARM_PLT32
8876 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8879 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
8880 inst
.relocs
[0].pc_rel
= 1;
8887 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8888 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8891 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8898 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8900 if (inst
.cond
== COND_ALWAYS
)
8901 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8903 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8907 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8910 /* ARM V5 branch-link-exchange instruction (argument parse)
8911 BLX <target_addr> ie BLX(1)
8912 BLX{<condition>} <Rm> ie BLX(2)
8913 Unfortunately, there are two different opcodes for this mnemonic.
8914 So, the insns[].value is not used, and the code here zaps values
8915 into inst.instruction.
8916 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8921 if (inst
.operands
[0].isreg
)
8923 /* Arg is a register; the opcode provided by insns[] is correct.
8924 It is not illegal to do "blx pc", just useless. */
8925 if (inst
.operands
[0].reg
== REG_PC
)
8926 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8928 inst
.instruction
|= inst
.operands
[0].reg
;
8932 /* Arg is an address; this instruction cannot be executed
8933 conditionally, and the opcode must be adjusted.
8934 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8935 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8936 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8937 inst
.instruction
= 0xfa000000;
8938 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8945 bfd_boolean want_reloc
;
8947 if (inst
.operands
[0].reg
== REG_PC
)
8948 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8950 inst
.instruction
|= inst
.operands
[0].reg
;
8951 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8952 it is for ARMv4t or earlier. */
8953 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8954 if (!ARM_FEATURE_ZERO (selected_object_arch
)
8955 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
8959 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8964 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
8968 /* ARM v5TEJ. Jump to Jazelle code. */
8973 if (inst
.operands
[0].reg
== REG_PC
)
8974 as_tsktsk (_("use of r15 in bxj is not really useful"));
8976 inst
.instruction
|= inst
.operands
[0].reg
;
8979 /* Co-processor data operation:
8980 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8981 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8985 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8986 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8987 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8988 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8989 inst
.instruction
|= inst
.operands
[4].reg
;
8990 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8996 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8997 encode_arm_shifter_operand (1);
9000 /* Transfer between coprocessor and ARM registers.
9001 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9006 No special properties. */
9008 struct deprecated_coproc_regs_s
9015 arm_feature_set deprecated
;
9016 arm_feature_set obsoleted
;
9017 const char *dep_msg
;
9018 const char *obs_msg
;
9021 #define DEPR_ACCESS_V8 \
9022 N_("This coprocessor register access is deprecated in ARMv8")
9024 /* Table of all deprecated coprocessor registers. */
9025 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
9027 {15, 0, 7, 10, 5, /* CP15DMB. */
9028 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9029 DEPR_ACCESS_V8
, NULL
},
9030 {15, 0, 7, 10, 4, /* CP15DSB. */
9031 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9032 DEPR_ACCESS_V8
, NULL
},
9033 {15, 0, 7, 5, 4, /* CP15ISB. */
9034 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9035 DEPR_ACCESS_V8
, NULL
},
9036 {14, 6, 1, 0, 0, /* TEEHBR. */
9037 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9038 DEPR_ACCESS_V8
, NULL
},
9039 {14, 6, 0, 0, 0, /* TEECR. */
9040 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9041 DEPR_ACCESS_V8
, NULL
},
9044 #undef DEPR_ACCESS_V8
9046 static const size_t deprecated_coproc_reg_count
=
9047 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
9055 Rd
= inst
.operands
[2].reg
;
9058 if (inst
.instruction
== 0xee000010
9059 || inst
.instruction
== 0xfe000010)
9061 reject_bad_reg (Rd
);
9062 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9064 constraint (Rd
== REG_SP
, BAD_SP
);
9069 if (inst
.instruction
== 0xe000010)
9070 constraint (Rd
== REG_PC
, BAD_PC
);
9073 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
9075 const struct deprecated_coproc_regs_s
*r
=
9076 deprecated_coproc_regs
+ i
;
9078 if (inst
.operands
[0].reg
== r
->cp
9079 && inst
.operands
[1].imm
== r
->opc1
9080 && inst
.operands
[3].reg
== r
->crn
9081 && inst
.operands
[4].reg
== r
->crm
9082 && inst
.operands
[5].imm
== r
->opc2
)
9084 if (! ARM_CPU_IS_ANY (cpu_variant
)
9085 && warn_on_deprecated
9086 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
9087 as_tsktsk ("%s", r
->dep_msg
);
9091 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9092 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
9093 inst
.instruction
|= Rd
<< 12;
9094 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9095 inst
.instruction
|= inst
.operands
[4].reg
;
9096 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9099 /* Transfer between coprocessor register and pair of ARM registers.
9100 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9105 Two XScale instructions are special cases of these:
9107 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9108 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9110 Result unpredictable if Rd or Rn is R15. */
9117 Rd
= inst
.operands
[2].reg
;
9118 Rn
= inst
.operands
[3].reg
;
9122 reject_bad_reg (Rd
);
9123 reject_bad_reg (Rn
);
9127 constraint (Rd
== REG_PC
, BAD_PC
);
9128 constraint (Rn
== REG_PC
, BAD_PC
);
9131 /* Only check the MRRC{2} variants. */
9132 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
9134 /* If Rd == Rn, error that the operation is
9135 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9136 constraint (Rd
== Rn
, BAD_OVERLAP
);
9139 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9140 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
9141 inst
.instruction
|= Rd
<< 12;
9142 inst
.instruction
|= Rn
<< 16;
9143 inst
.instruction
|= inst
.operands
[4].reg
;
9149 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
9150 if (inst
.operands
[1].present
)
9152 inst
.instruction
|= CPSI_MMOD
;
9153 inst
.instruction
|= inst
.operands
[1].imm
;
9160 inst
.instruction
|= inst
.operands
[0].imm
;
9166 unsigned Rd
, Rn
, Rm
;
9168 Rd
= inst
.operands
[0].reg
;
9169 Rn
= (inst
.operands
[1].present
9170 ? inst
.operands
[1].reg
: Rd
);
9171 Rm
= inst
.operands
[2].reg
;
9173 constraint ((Rd
== REG_PC
), BAD_PC
);
9174 constraint ((Rn
== REG_PC
), BAD_PC
);
9175 constraint ((Rm
== REG_PC
), BAD_PC
);
9177 inst
.instruction
|= Rd
<< 16;
9178 inst
.instruction
|= Rn
<< 0;
9179 inst
.instruction
|= Rm
<< 8;
9185 /* There is no IT instruction in ARM mode. We
9186 process it to do the validation as if in
9187 thumb mode, just in case the code gets
9188 assembled for thumb using the unified syntax. */
9193 set_pred_insn_type (IT_INSN
);
9194 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
9195 now_pred
.cc
= inst
.operands
[0].imm
;
9199 /* If there is only one register in the register list,
9200 then return its register number. Otherwise return -1. */
9202 only_one_reg_in_list (int range
)
9204 int i
= ffs (range
) - 1;
9205 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
9209 encode_ldmstm(int from_push_pop_mnem
)
9211 int base_reg
= inst
.operands
[0].reg
;
9212 int range
= inst
.operands
[1].imm
;
9215 inst
.instruction
|= base_reg
<< 16;
9216 inst
.instruction
|= range
;
9218 if (inst
.operands
[1].writeback
)
9219 inst
.instruction
|= LDM_TYPE_2_OR_3
;
9221 if (inst
.operands
[0].writeback
)
9223 inst
.instruction
|= WRITE_BACK
;
9224 /* Check for unpredictable uses of writeback. */
9225 if (inst
.instruction
& LOAD_BIT
)
9227 /* Not allowed in LDM type 2. */
9228 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
9229 && ((range
& (1 << REG_PC
)) == 0))
9230 as_warn (_("writeback of base register is UNPREDICTABLE"));
9231 /* Only allowed if base reg not in list for other types. */
9232 else if (range
& (1 << base_reg
))
9233 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9237 /* Not allowed for type 2. */
9238 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
9239 as_warn (_("writeback of base register is UNPREDICTABLE"));
9240 /* Only allowed if base reg not in list, or first in list. */
9241 else if ((range
& (1 << base_reg
))
9242 && (range
& ((1 << base_reg
) - 1)))
9243 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9247 /* If PUSH/POP has only one register, then use the A2 encoding. */
9248 one_reg
= only_one_reg_in_list (range
);
9249 if (from_push_pop_mnem
&& one_reg
>= 0)
9251 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9253 if (is_push
&& one_reg
== 13 /* SP */)
9254 /* PR 22483: The A2 encoding cannot be used when
9255 pushing the stack pointer as this is UNPREDICTABLE. */
9258 inst
.instruction
&= A_COND_MASK
;
9259 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9260 inst
.instruction
|= one_reg
<< 12;
9267 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
9270 /* ARMv5TE load-consecutive (argument parse)
9279 constraint (inst
.operands
[0].reg
% 2 != 0,
9280 _("first transfer register must be even"));
9281 constraint (inst
.operands
[1].present
9282 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9283 _("can only transfer two consecutive registers"));
9284 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9285 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9287 if (!inst
.operands
[1].present
)
9288 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9290 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9291 register and the first register written; we have to diagnose
9292 overlap between the base and the second register written here. */
9294 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9295 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9296 as_warn (_("base register written back, and overlaps "
9297 "second transfer register"));
9299 if (!(inst
.instruction
& V4_STR_BIT
))
9301 /* For an index-register load, the index register must not overlap the
9302 destination (even if not write-back). */
9303 if (inst
.operands
[2].immisreg
9304 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9305 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9306 as_warn (_("index register overlaps transfer register"));
9308 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9309 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
9315 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9316 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9317 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9318 || inst
.operands
[1].negative
9319 /* This can arise if the programmer has written
9321 or if they have mistakenly used a register name as the last
9324 It is very difficult to distinguish between these two cases
9325 because "rX" might actually be a label. ie the register
9326 name has been occluded by a symbol of the same name. So we
9327 just generate a general 'bad addressing mode' type error
9328 message and leave it up to the programmer to discover the
9329 true cause and fix their mistake. */
9330 || (inst
.operands
[1].reg
== REG_PC
),
9333 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9334 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9335 _("offset must be zero in ARM encoding"));
9337 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9339 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9340 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9341 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9347 constraint (inst
.operands
[0].reg
% 2 != 0,
9348 _("even register required"));
9349 constraint (inst
.operands
[1].present
9350 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9351 _("can only load two consecutive registers"));
9352 /* If op 1 were present and equal to PC, this function wouldn't
9353 have been called in the first place. */
9354 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9356 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9357 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9360 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9361 which is not a multiple of four is UNPREDICTABLE. */
9363 check_ldr_r15_aligned (void)
9365 constraint (!(inst
.operands
[1].immisreg
)
9366 && (inst
.operands
[0].reg
== REG_PC
9367 && inst
.operands
[1].reg
== REG_PC
9368 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9369 _("ldr to register 15 must be 4-byte aligned"));
9375 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9376 if (!inst
.operands
[1].isreg
)
9377 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9379 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9380 check_ldr_r15_aligned ();
9386 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9388 if (inst
.operands
[1].preind
)
9390 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9391 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9392 _("this instruction requires a post-indexed address"));
9394 inst
.operands
[1].preind
= 0;
9395 inst
.operands
[1].postind
= 1;
9396 inst
.operands
[1].writeback
= 1;
9398 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9399 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9402 /* Halfword and signed-byte load/store operations. */
9407 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9408 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9409 if (!inst
.operands
[1].isreg
)
9410 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9412 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9418 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9420 if (inst
.operands
[1].preind
)
9422 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9423 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9424 _("this instruction requires a post-indexed address"));
9426 inst
.operands
[1].preind
= 0;
9427 inst
.operands
[1].postind
= 1;
9428 inst
.operands
[1].writeback
= 1;
9430 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9431 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9434 /* Co-processor register load/store.
9435 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9439 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9440 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9441 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9447 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9448 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9449 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9450 && !(inst
.instruction
& 0x00400000))
9451 as_tsktsk (_("Rd and Rm should be different in mla"));
9453 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9454 inst
.instruction
|= inst
.operands
[1].reg
;
9455 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9456 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9462 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9463 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9465 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9466 encode_arm_shifter_operand (1);
9469 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9476 top
= (inst
.instruction
& 0x00400000) != 0;
9477 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
9478 _(":lower16: not allowed in this instruction"));
9479 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
9480 _(":upper16: not allowed in this instruction"));
9481 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9482 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
9484 imm
= inst
.relocs
[0].exp
.X_add_number
;
9485 /* The value is in two pieces: 0:11, 16:19. */
9486 inst
.instruction
|= (imm
& 0x00000fff);
9487 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9492 do_vfp_nsyn_mrs (void)
9494 if (inst
.operands
[0].isvec
)
9496 if (inst
.operands
[1].reg
!= 1)
9497 first_error (_("operand 1 must be FPSCR"));
9498 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9499 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9500 do_vfp_nsyn_opcode ("fmstat");
9502 else if (inst
.operands
[1].isvec
)
9503 do_vfp_nsyn_opcode ("fmrx");
9511 do_vfp_nsyn_msr (void)
9513 if (inst
.operands
[0].isvec
)
9514 do_vfp_nsyn_opcode ("fmxr");
9524 unsigned Rt
= inst
.operands
[0].reg
;
9526 if (thumb_mode
&& Rt
== REG_SP
)
9528 inst
.error
= BAD_SP
;
9532 /* MVFR2 is only valid at ARMv8-A. */
9533 if (inst
.operands
[1].reg
== 5)
9534 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9537 /* APSR_ sets isvec. All other refs to PC are illegal. */
9538 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9540 inst
.error
= BAD_PC
;
9544 /* If we get through parsing the register name, we just insert the number
9545 generated into the instruction without further validation. */
9546 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9547 inst
.instruction
|= (Rt
<< 12);
9553 unsigned Rt
= inst
.operands
[1].reg
;
9556 reject_bad_reg (Rt
);
9557 else if (Rt
== REG_PC
)
9559 inst
.error
= BAD_PC
;
9563 /* MVFR2 is only valid for ARMv8-A. */
9564 if (inst
.operands
[0].reg
== 5)
9565 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9568 /* If we get through parsing the register name, we just insert the number
9569 generated into the instruction without further validation. */
9570 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9571 inst
.instruction
|= (Rt
<< 12);
9579 if (do_vfp_nsyn_mrs () == SUCCESS
)
9582 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9583 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9585 if (inst
.operands
[1].isreg
)
9587 br
= inst
.operands
[1].reg
;
9588 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
9589 as_bad (_("bad register for mrs"));
9593 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9594 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9596 _("'APSR', 'CPSR' or 'SPSR' expected"));
9597 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9600 inst
.instruction
|= br
;
9603 /* Two possible forms:
9604 "{C|S}PSR_<field>, Rm",
9605 "{C|S}PSR_f, #expression". */
9610 if (do_vfp_nsyn_msr () == SUCCESS
)
9613 inst
.instruction
|= inst
.operands
[0].imm
;
9614 if (inst
.operands
[1].isreg
)
9615 inst
.instruction
|= inst
.operands
[1].reg
;
9618 inst
.instruction
|= INST_IMMEDIATE
;
9619 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9620 inst
.relocs
[0].pc_rel
= 0;
9627 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9629 if (!inst
.operands
[2].present
)
9630 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9631 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9632 inst
.instruction
|= inst
.operands
[1].reg
;
9633 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9635 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9636 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9637 as_tsktsk (_("Rd and Rm should be different in mul"));
9640 /* Long Multiply Parser
9641 UMULL RdLo, RdHi, Rm, Rs
9642 SMULL RdLo, RdHi, Rm, Rs
9643 UMLAL RdLo, RdHi, Rm, Rs
9644 SMLAL RdLo, RdHi, Rm, Rs. */
9649 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9650 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9651 inst
.instruction
|= inst
.operands
[2].reg
;
9652 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9654 /* rdhi and rdlo must be different. */
9655 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9656 as_tsktsk (_("rdhi and rdlo must be different"));
9658 /* rdhi, rdlo and rm must all be different before armv6. */
9659 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9660 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9661 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9662 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9668 if (inst
.operands
[0].present
9669 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9671 /* Architectural NOP hints are CPSR sets with no bits selected. */
9672 inst
.instruction
&= 0xf0000000;
9673 inst
.instruction
|= 0x0320f000;
9674 if (inst
.operands
[0].present
)
9675 inst
.instruction
|= inst
.operands
[0].imm
;
9679 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9680 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9681 Condition defaults to COND_ALWAYS.
9682 Error if Rd, Rn or Rm are R15. */
9687 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9688 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9689 inst
.instruction
|= inst
.operands
[2].reg
;
9690 if (inst
.operands
[3].present
)
9691 encode_arm_shift (3);
9694 /* ARM V6 PKHTB (Argument Parse). */
9699 if (!inst
.operands
[3].present
)
9701 /* If the shift specifier is omitted, turn the instruction
9702 into pkhbt rd, rm, rn. */
9703 inst
.instruction
&= 0xfff00010;
9704 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9705 inst
.instruction
|= inst
.operands
[1].reg
;
9706 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9710 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9711 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9712 inst
.instruction
|= inst
.operands
[2].reg
;
9713 encode_arm_shift (3);
9717 /* ARMv5TE: Preload-Cache
9718 MP Extensions: Preload for write
9722 Syntactically, like LDR with B=1, W=0, L=1. */
9727 constraint (!inst
.operands
[0].isreg
,
9728 _("'[' expected after PLD mnemonic"));
9729 constraint (inst
.operands
[0].postind
,
9730 _("post-indexed expression used in preload instruction"));
9731 constraint (inst
.operands
[0].writeback
,
9732 _("writeback used in preload instruction"));
9733 constraint (!inst
.operands
[0].preind
,
9734 _("unindexed addressing used in preload instruction"));
9735 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9738 /* ARMv7: PLI <addr_mode> */
9742 constraint (!inst
.operands
[0].isreg
,
9743 _("'[' expected after PLI mnemonic"));
9744 constraint (inst
.operands
[0].postind
,
9745 _("post-indexed expression used in preload instruction"));
9746 constraint (inst
.operands
[0].writeback
,
9747 _("writeback used in preload instruction"));
9748 constraint (!inst
.operands
[0].preind
,
9749 _("unindexed addressing used in preload instruction"));
9750 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9751 inst
.instruction
&= ~PRE_INDEX
;
9757 constraint (inst
.operands
[0].writeback
,
9758 _("push/pop do not support {reglist}^"));
9759 inst
.operands
[1] = inst
.operands
[0];
9760 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9761 inst
.operands
[0].isreg
= 1;
9762 inst
.operands
[0].writeback
= 1;
9763 inst
.operands
[0].reg
= REG_SP
;
9764 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9767 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9768 word at the specified address and the following word
9770 Unconditionally executed.
9771 Error if Rn is R15. */
9776 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9777 if (inst
.operands
[0].writeback
)
9778 inst
.instruction
|= WRITE_BACK
;
9781 /* ARM V6 ssat (argument parse). */
9786 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9787 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9788 inst
.instruction
|= inst
.operands
[2].reg
;
9790 if (inst
.operands
[3].present
)
9791 encode_arm_shift (3);
9794 /* ARM V6 usat (argument parse). */
9799 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9800 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9801 inst
.instruction
|= inst
.operands
[2].reg
;
9803 if (inst
.operands
[3].present
)
9804 encode_arm_shift (3);
9807 /* ARM V6 ssat16 (argument parse). */
9812 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9813 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9814 inst
.instruction
|= inst
.operands
[2].reg
;
9820 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9821 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9822 inst
.instruction
|= inst
.operands
[2].reg
;
9825 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9826 preserving the other bits.
9828 setend <endian_specifier>, where <endian_specifier> is either
9834 if (warn_on_deprecated
9835 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9836 as_tsktsk (_("setend use is deprecated for ARMv8"));
9838 if (inst
.operands
[0].imm
)
9839 inst
.instruction
|= 0x200;
9845 unsigned int Rm
= (inst
.operands
[1].present
9846 ? inst
.operands
[1].reg
9847 : inst
.operands
[0].reg
);
9849 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9850 inst
.instruction
|= Rm
;
9851 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9853 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9854 inst
.instruction
|= SHIFT_BY_REG
;
9855 /* PR 12854: Error on extraneous shifts. */
9856 constraint (inst
.operands
[2].shifted
,
9857 _("extraneous shift as part of operand to shift insn"));
9860 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
9866 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
9867 inst
.relocs
[0].pc_rel
= 0;
9873 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
9874 inst
.relocs
[0].pc_rel
= 0;
9880 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
9881 inst
.relocs
[0].pc_rel
= 0;
9887 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9888 _("selected processor does not support SETPAN instruction"));
9890 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9896 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9897 _("selected processor does not support SETPAN instruction"));
9899 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9902 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9903 SMLAxy{cond} Rd,Rm,Rs,Rn
9904 SMLAWy{cond} Rd,Rm,Rs,Rn
9905 Error if any register is R15. */
9910 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9911 inst
.instruction
|= inst
.operands
[1].reg
;
9912 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9913 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9916 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9917 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9918 Error if any register is R15.
9919 Warning if Rdlo == Rdhi. */
9924 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9925 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9926 inst
.instruction
|= inst
.operands
[2].reg
;
9927 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9929 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9930 as_tsktsk (_("rdhi and rdlo must be different"));
9933 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9934 SMULxy{cond} Rd,Rm,Rs
9935 Error if any register is R15. */
9940 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9941 inst
.instruction
|= inst
.operands
[1].reg
;
9942 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9945 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9946 the same for both ARM and Thumb-2. */
9953 if (inst
.operands
[0].present
)
9955 reg
= inst
.operands
[0].reg
;
9956 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9961 inst
.instruction
|= reg
<< 16;
9962 inst
.instruction
|= inst
.operands
[1].imm
;
9963 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9964 inst
.instruction
|= WRITE_BACK
;
9967 /* ARM V6 strex (argument parse). */
9972 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9973 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9974 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9975 || inst
.operands
[2].negative
9976 /* See comment in do_ldrex(). */
9977 || (inst
.operands
[2].reg
== REG_PC
),
9980 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9981 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9983 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9984 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9985 _("offset must be zero in ARM encoding"));
9987 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9988 inst
.instruction
|= inst
.operands
[1].reg
;
9989 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9990 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9996 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9997 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9998 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9999 || inst
.operands
[2].negative
,
10002 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10003 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10011 constraint (inst
.operands
[1].reg
% 2 != 0,
10012 _("even register required"));
10013 constraint (inst
.operands
[2].present
10014 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
10015 _("can only store two consecutive registers"));
10016 /* If op 2 were present and equal to PC, this function wouldn't
10017 have been called in the first place. */
10018 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
10020 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10021 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
10022 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
10025 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10026 inst
.instruction
|= inst
.operands
[1].reg
;
10027 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10034 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10035 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10043 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10044 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10049 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10050 extends it to 32-bits, and adds the result to a value in another
10051 register. You can specify a rotation by 0, 8, 16, or 24 bits
10052 before extracting the 16-bit value.
10053 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10054 Condition defaults to COND_ALWAYS.
10055 Error if any register uses R15. */
10060 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10061 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10062 inst
.instruction
|= inst
.operands
[2].reg
;
10063 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
10068 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10069 Condition defaults to COND_ALWAYS.
10070 Error if any register uses R15. */
10075 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10076 inst
.instruction
|= inst
.operands
[1].reg
;
10077 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
10080 /* VFP instructions. In a logical order: SP variant first, monad
10081 before dyad, arithmetic then move then load/store. */
10084 do_vfp_sp_monadic (void)
10086 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10087 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10091 do_vfp_sp_dyadic (void)
10093 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10094 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10095 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10099 do_vfp_sp_compare_z (void)
10101 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10105 do_vfp_dp_sp_cvt (void)
10107 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10108 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10112 do_vfp_sp_dp_cvt (void)
10114 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10115 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10119 do_vfp_reg_from_sp (void)
10121 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10122 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10126 do_vfp_reg2_from_sp2 (void)
10128 constraint (inst
.operands
[2].imm
!= 2,
10129 _("only two consecutive VFP SP registers allowed here"));
10130 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10131 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10132 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10136 do_vfp_sp_from_reg (void)
10138 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
10139 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10143 do_vfp_sp2_from_reg2 (void)
10145 constraint (inst
.operands
[0].imm
!= 2,
10146 _("only two consecutive VFP SP registers allowed here"));
10147 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
10148 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10149 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10153 do_vfp_sp_ldst (void)
10155 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10156 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10160 do_vfp_dp_ldst (void)
10162 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10163 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10168 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
10170 if (inst
.operands
[0].writeback
)
10171 inst
.instruction
|= WRITE_BACK
;
10173 constraint (ldstm_type
!= VFP_LDSTMIA
,
10174 _("this addressing mode requires base-register writeback"));
10175 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10176 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
10177 inst
.instruction
|= inst
.operands
[1].imm
;
10181 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
10185 if (inst
.operands
[0].writeback
)
10186 inst
.instruction
|= WRITE_BACK
;
10188 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
10189 _("this addressing mode requires base-register writeback"));
10191 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10192 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10194 count
= inst
.operands
[1].imm
<< 1;
10195 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
10198 inst
.instruction
|= count
;
10202 do_vfp_sp_ldstmia (void)
10204 vfp_sp_ldstm (VFP_LDSTMIA
);
10208 do_vfp_sp_ldstmdb (void)
10210 vfp_sp_ldstm (VFP_LDSTMDB
);
10214 do_vfp_dp_ldstmia (void)
10216 vfp_dp_ldstm (VFP_LDSTMIA
);
10220 do_vfp_dp_ldstmdb (void)
10222 vfp_dp_ldstm (VFP_LDSTMDB
);
10226 do_vfp_xp_ldstmia (void)
10228 vfp_dp_ldstm (VFP_LDSTMIAX
);
10232 do_vfp_xp_ldstmdb (void)
10234 vfp_dp_ldstm (VFP_LDSTMDBX
);
10238 do_vfp_dp_rd_rm (void)
10240 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10241 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10245 do_vfp_dp_rn_rd (void)
10247 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
10248 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10252 do_vfp_dp_rd_rn (void)
10254 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10255 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10259 do_vfp_dp_rd_rn_rm (void)
10261 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10262 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10263 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10267 do_vfp_dp_rd (void)
10269 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10273 do_vfp_dp_rm_rd_rn (void)
10275 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10276 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10277 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10280 /* VFPv3 instructions. */
10282 do_vfp_sp_const (void)
10284 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10285 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10286 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10290 do_vfp_dp_const (void)
10292 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10293 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10294 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10298 vfp_conv (int srcsize
)
10300 int immbits
= srcsize
- inst
.operands
[1].imm
;
10302 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10304 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10305 i.e. immbits must be in range 0 - 16. */
10306 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10309 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10311 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10312 i.e. immbits must be in range 0 - 31. */
10313 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10317 inst
.instruction
|= (immbits
& 1) << 5;
10318 inst
.instruction
|= (immbits
>> 1);
10322 do_vfp_sp_conv_16 (void)
10324 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10329 do_vfp_dp_conv_16 (void)
10331 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10336 do_vfp_sp_conv_32 (void)
10338 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10343 do_vfp_dp_conv_32 (void)
10345 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10349 /* FPA instructions. Also in a logical order. */
10354 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10355 inst
.instruction
|= inst
.operands
[1].reg
;
10359 do_fpa_ldmstm (void)
10361 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10362 switch (inst
.operands
[1].imm
)
10364 case 1: inst
.instruction
|= CP_T_X
; break;
10365 case 2: inst
.instruction
|= CP_T_Y
; break;
10366 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
10371 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
10373 /* The instruction specified "ea" or "fd", so we can only accept
10374 [Rn]{!}. The instruction does not really support stacking or
10375 unstacking, so we have to emulate these by setting appropriate
10376 bits and offsets. */
10377 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10378 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10379 _("this instruction does not support indexing"));
10381 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10382 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10384 if (!(inst
.instruction
& INDEX_UP
))
10385 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
10387 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10389 inst
.operands
[2].preind
= 0;
10390 inst
.operands
[2].postind
= 1;
10394 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10397 /* iWMMXt instructions: strictly in alphabetical order. */
10400 do_iwmmxt_tandorc (void)
10402 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10406 do_iwmmxt_textrc (void)
10408 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10409 inst
.instruction
|= inst
.operands
[1].imm
;
10413 do_iwmmxt_textrm (void)
10415 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10416 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10417 inst
.instruction
|= inst
.operands
[2].imm
;
10421 do_iwmmxt_tinsr (void)
10423 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10424 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10425 inst
.instruction
|= inst
.operands
[2].imm
;
10429 do_iwmmxt_tmia (void)
10431 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10432 inst
.instruction
|= inst
.operands
[1].reg
;
10433 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10437 do_iwmmxt_waligni (void)
10439 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10440 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10441 inst
.instruction
|= inst
.operands
[2].reg
;
10442 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10446 do_iwmmxt_wmerge (void)
10448 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10449 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10450 inst
.instruction
|= inst
.operands
[2].reg
;
10451 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10455 do_iwmmxt_wmov (void)
10457 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10458 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10459 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10460 inst
.instruction
|= inst
.operands
[1].reg
;
10464 do_iwmmxt_wldstbh (void)
10467 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10469 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10471 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10472 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10476 do_iwmmxt_wldstw (void)
10478 /* RIWR_RIWC clears .isreg for a control register. */
10479 if (!inst
.operands
[0].isreg
)
10481 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10482 inst
.instruction
|= 0xf0000000;
10485 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10486 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10490 do_iwmmxt_wldstd (void)
10492 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10493 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10494 && inst
.operands
[1].immisreg
)
10496 inst
.instruction
&= ~0x1a000ff;
10497 inst
.instruction
|= (0xfU
<< 28);
10498 if (inst
.operands
[1].preind
)
10499 inst
.instruction
|= PRE_INDEX
;
10500 if (!inst
.operands
[1].negative
)
10501 inst
.instruction
|= INDEX_UP
;
10502 if (inst
.operands
[1].writeback
)
10503 inst
.instruction
|= WRITE_BACK
;
10504 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10505 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10506 inst
.instruction
|= inst
.operands
[1].imm
;
10509 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10513 do_iwmmxt_wshufh (void)
10515 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10516 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10517 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10518 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10522 do_iwmmxt_wzero (void)
10524 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10525 inst
.instruction
|= inst
.operands
[0].reg
;
10526 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10527 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10531 do_iwmmxt_wrwrwr_or_imm5 (void)
10533 if (inst
.operands
[2].isreg
)
10536 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10537 _("immediate operand requires iWMMXt2"));
10539 if (inst
.operands
[2].imm
== 0)
10541 switch ((inst
.instruction
>> 20) & 0xf)
10547 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10548 inst
.operands
[2].imm
= 16;
10549 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10555 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10556 inst
.operands
[2].imm
= 32;
10557 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10564 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10566 wrn
= (inst
.instruction
>> 16) & 0xf;
10567 inst
.instruction
&= 0xff0fff0f;
10568 inst
.instruction
|= wrn
;
10569 /* Bail out here; the instruction is now assembled. */
10574 /* Map 32 -> 0, etc. */
10575 inst
.operands
[2].imm
&= 0x1f;
10576 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10580 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10581 operations first, then control, shift, and load/store. */
10583 /* Insns like "foo X,Y,Z". */
10586 do_mav_triple (void)
10588 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10589 inst
.instruction
|= inst
.operands
[1].reg
;
10590 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10593 /* Insns like "foo W,X,Y,Z".
10594 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10599 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10600 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10601 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10602 inst
.instruction
|= inst
.operands
[3].reg
;
10605 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10607 do_mav_dspsc (void)
10609 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10612 /* Maverick shift immediate instructions.
10613 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10614 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10617 do_mav_shift (void)
10619 int imm
= inst
.operands
[2].imm
;
10621 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10622 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10624 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10625 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10626 Bit 4 should be 0. */
10627 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10629 inst
.instruction
|= imm
;
10632 /* XScale instructions. Also sorted arithmetic before move. */
10634 /* Xscale multiply-accumulate (argument parse)
10637 MIAxycc acc0,Rm,Rs. */
10642 inst
.instruction
|= inst
.operands
[1].reg
;
10643 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10646 /* Xscale move-accumulator-register (argument parse)
10648 MARcc acc0,RdLo,RdHi. */
10653 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10654 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10657 /* Xscale move-register-accumulator (argument parse)
10659 MRAcc RdLo,RdHi,acc0. */
10664 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10665 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10666 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10669 /* Encoding functions relevant only to Thumb. */
10671 /* inst.operands[i] is a shifted-register operand; encode
10672 it into inst.instruction in the format used by Thumb32. */
10675 encode_thumb32_shifted_operand (int i
)
10677 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10678 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10680 constraint (inst
.operands
[i
].immisreg
,
10681 _("shift by register not allowed in thumb mode"));
10682 inst
.instruction
|= inst
.operands
[i
].reg
;
10683 if (shift
== SHIFT_RRX
)
10684 inst
.instruction
|= SHIFT_ROR
<< 4;
10687 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10688 _("expression too complex"));
10690 constraint (value
> 32
10691 || (value
== 32 && (shift
== SHIFT_LSL
10692 || shift
== SHIFT_ROR
)),
10693 _("shift expression is too large"));
10697 else if (value
== 32)
10700 inst
.instruction
|= shift
<< 4;
10701 inst
.instruction
|= (value
& 0x1c) << 10;
10702 inst
.instruction
|= (value
& 0x03) << 6;
10707 /* inst.operands[i] was set up by parse_address. Encode it into a
10708 Thumb32 format load or store instruction. Reject forms that cannot
10709 be used with such instructions. If is_t is true, reject forms that
10710 cannot be used with a T instruction; if is_d is true, reject forms
10711 that cannot be used with a D instruction. If it is a store insn,
10712 reject PC in Rn. */
10715 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10717 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10719 constraint (!inst
.operands
[i
].isreg
,
10720 _("Instruction does not support =N addresses"));
10722 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10723 if (inst
.operands
[i
].immisreg
)
10725 constraint (is_pc
, BAD_PC_ADDRESSING
);
10726 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10727 constraint (inst
.operands
[i
].negative
,
10728 _("Thumb does not support negative register indexing"));
10729 constraint (inst
.operands
[i
].postind
,
10730 _("Thumb does not support register post-indexing"));
10731 constraint (inst
.operands
[i
].writeback
,
10732 _("Thumb does not support register indexing with writeback"));
10733 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10734 _("Thumb supports only LSL in shifted register indexing"));
10736 inst
.instruction
|= inst
.operands
[i
].imm
;
10737 if (inst
.operands
[i
].shifted
)
10739 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10740 _("expression too complex"));
10741 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10742 || inst
.relocs
[0].exp
.X_add_number
> 3,
10743 _("shift out of range"));
10744 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10746 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10748 else if (inst
.operands
[i
].preind
)
10750 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10751 constraint (is_t
&& inst
.operands
[i
].writeback
,
10752 _("cannot use writeback with this instruction"));
10753 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10754 BAD_PC_ADDRESSING
);
10758 inst
.instruction
|= 0x01000000;
10759 if (inst
.operands
[i
].writeback
)
10760 inst
.instruction
|= 0x00200000;
10764 inst
.instruction
|= 0x00000c00;
10765 if (inst
.operands
[i
].writeback
)
10766 inst
.instruction
|= 0x00000100;
10768 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10770 else if (inst
.operands
[i
].postind
)
10772 gas_assert (inst
.operands
[i
].writeback
);
10773 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10774 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10777 inst
.instruction
|= 0x00200000;
10779 inst
.instruction
|= 0x00000900;
10780 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10782 else /* unindexed - only for coprocessor */
10783 inst
.error
= _("instruction does not accept unindexed addressing");
10786 /* Table of Thumb instructions which exist in both 16- and 32-bit
10787 encodings (the latter only in post-V6T2 cores). The index is the
10788 value used in the insns table below. When there is more than one
10789 possible 16-bit encoding for the instruction, this table always
10791 Also contains several pseudo-instructions used during relaxation. */
10792 #define T16_32_TAB \
10793 X(_adc, 4140, eb400000), \
10794 X(_adcs, 4140, eb500000), \
10795 X(_add, 1c00, eb000000), \
10796 X(_adds, 1c00, eb100000), \
10797 X(_addi, 0000, f1000000), \
10798 X(_addis, 0000, f1100000), \
10799 X(_add_pc,000f, f20f0000), \
10800 X(_add_sp,000d, f10d0000), \
10801 X(_adr, 000f, f20f0000), \
10802 X(_and, 4000, ea000000), \
10803 X(_ands, 4000, ea100000), \
10804 X(_asr, 1000, fa40f000), \
10805 X(_asrs, 1000, fa50f000), \
10806 X(_b, e000, f000b000), \
10807 X(_bcond, d000, f0008000), \
10808 X(_bf, 0000, f040e001), \
10809 X(_bfcsel,0000, f000e001), \
10810 X(_bfx, 0000, f060e001), \
10811 X(_bfl, 0000, f000c001), \
10812 X(_bflx, 0000, f070e001), \
10813 X(_bic, 4380, ea200000), \
10814 X(_bics, 4380, ea300000), \
10815 X(_cmn, 42c0, eb100f00), \
10816 X(_cmp, 2800, ebb00f00), \
10817 X(_cpsie, b660, f3af8400), \
10818 X(_cpsid, b670, f3af8600), \
10819 X(_cpy, 4600, ea4f0000), \
10820 X(_dec_sp,80dd, f1ad0d00), \
10821 X(_dls, 0000, f040e001), \
10822 X(_eor, 4040, ea800000), \
10823 X(_eors, 4040, ea900000), \
10824 X(_inc_sp,00dd, f10d0d00), \
10825 X(_ldmia, c800, e8900000), \
10826 X(_ldr, 6800, f8500000), \
10827 X(_ldrb, 7800, f8100000), \
10828 X(_ldrh, 8800, f8300000), \
10829 X(_ldrsb, 5600, f9100000), \
10830 X(_ldrsh, 5e00, f9300000), \
10831 X(_ldr_pc,4800, f85f0000), \
10832 X(_ldr_pc2,4800, f85f0000), \
10833 X(_ldr_sp,9800, f85d0000), \
10834 X(_le, 0000, f00fc001), \
10835 X(_lsl, 0000, fa00f000), \
10836 X(_lsls, 0000, fa10f000), \
10837 X(_lsr, 0800, fa20f000), \
10838 X(_lsrs, 0800, fa30f000), \
10839 X(_mov, 2000, ea4f0000), \
10840 X(_movs, 2000, ea5f0000), \
10841 X(_mul, 4340, fb00f000), \
10842 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10843 X(_mvn, 43c0, ea6f0000), \
10844 X(_mvns, 43c0, ea7f0000), \
10845 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10846 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10847 X(_orr, 4300, ea400000), \
10848 X(_orrs, 4300, ea500000), \
10849 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10850 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10851 X(_rev, ba00, fa90f080), \
10852 X(_rev16, ba40, fa90f090), \
10853 X(_revsh, bac0, fa90f0b0), \
10854 X(_ror, 41c0, fa60f000), \
10855 X(_rors, 41c0, fa70f000), \
10856 X(_sbc, 4180, eb600000), \
10857 X(_sbcs, 4180, eb700000), \
10858 X(_stmia, c000, e8800000), \
10859 X(_str, 6000, f8400000), \
10860 X(_strb, 7000, f8000000), \
10861 X(_strh, 8000, f8200000), \
10862 X(_str_sp,9000, f84d0000), \
10863 X(_sub, 1e00, eba00000), \
10864 X(_subs, 1e00, ebb00000), \
10865 X(_subi, 8000, f1a00000), \
10866 X(_subis, 8000, f1b00000), \
10867 X(_sxtb, b240, fa4ff080), \
10868 X(_sxth, b200, fa0ff080), \
10869 X(_tst, 4200, ea100f00), \
10870 X(_uxtb, b2c0, fa5ff080), \
10871 X(_uxth, b280, fa1ff080), \
10872 X(_nop, bf00, f3af8000), \
10873 X(_yield, bf10, f3af8001), \
10874 X(_wfe, bf20, f3af8002), \
10875 X(_wfi, bf30, f3af8003), \
10876 X(_wls, 0000, f040c001), \
10877 X(_sev, bf40, f3af8004), \
10878 X(_sevl, bf50, f3af8005), \
10879 X(_udf, de00, f7f0a000)
10881 /* To catch errors in encoding functions, the codes are all offset by
10882 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10883 as 16-bit instructions. */
10884 #define X(a,b,c) T_MNEM##a
10885 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10888 #define X(a,b,c) 0x##b
10889 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10890 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10893 #define X(a,b,c) 0x##c
10894 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10895 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10896 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10900 /* Thumb instruction encoders, in alphabetical order. */
10902 /* ADDW or SUBW. */
10905 do_t_add_sub_w (void)
10909 Rd
= inst
.operands
[0].reg
;
10910 Rn
= inst
.operands
[1].reg
;
10912 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10913 is the SP-{plus,minus}-immediate form of the instruction. */
10915 constraint (Rd
== REG_PC
, BAD_PC
);
10917 reject_bad_reg (Rd
);
10919 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10920 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10923 /* Parse an add or subtract instruction. We get here with inst.instruction
10924 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10927 do_t_add_sub (void)
10931 Rd
= inst
.operands
[0].reg
;
10932 Rs
= (inst
.operands
[1].present
10933 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10934 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10937 set_pred_insn_type_last ();
10939 if (unified_syntax
)
10942 bfd_boolean narrow
;
10945 flags
= (inst
.instruction
== T_MNEM_adds
10946 || inst
.instruction
== T_MNEM_subs
);
10948 narrow
= !in_pred_block ();
10950 narrow
= in_pred_block ();
10951 if (!inst
.operands
[2].isreg
)
10955 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10956 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10958 add
= (inst
.instruction
== T_MNEM_add
10959 || inst
.instruction
== T_MNEM_adds
);
10961 if (inst
.size_req
!= 4)
10963 /* Attempt to use a narrow opcode, with relaxation if
10965 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10966 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10967 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10968 opcode
= T_MNEM_add_sp
;
10969 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10970 opcode
= T_MNEM_add_pc
;
10971 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10974 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10976 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10980 inst
.instruction
= THUMB_OP16(opcode
);
10981 inst
.instruction
|= (Rd
<< 4) | Rs
;
10982 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10983 || (inst
.relocs
[0].type
10984 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
10986 if (inst
.size_req
== 2)
10987 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10989 inst
.relax
= opcode
;
10993 constraint (inst
.size_req
== 2, BAD_HIREG
);
10995 if (inst
.size_req
== 4
10996 || (inst
.size_req
!= 2 && !opcode
))
10998 constraint ((inst
.relocs
[0].type
10999 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
11000 && (inst
.relocs
[0].type
11001 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
11002 THUMB1_RELOC_ONLY
);
11005 constraint (add
, BAD_PC
);
11006 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
11007 _("only SUBS PC, LR, #const allowed"));
11008 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
11009 _("expression too complex"));
11010 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
11011 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
11012 _("immediate value out of range"));
11013 inst
.instruction
= T2_SUBS_PC_LR
11014 | inst
.relocs
[0].exp
.X_add_number
;
11015 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
11018 else if (Rs
== REG_PC
)
11020 /* Always use addw/subw. */
11021 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
11022 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
11026 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11027 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
11030 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11032 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
11034 inst
.instruction
|= Rd
<< 8;
11035 inst
.instruction
|= Rs
<< 16;
11040 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
11041 unsigned int shift
= inst
.operands
[2].shift_kind
;
11043 Rn
= inst
.operands
[2].reg
;
11044 /* See if we can do this with a 16-bit instruction. */
11045 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
11047 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11052 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
11053 || inst
.instruction
== T_MNEM_add
)
11055 : T_OPCODE_SUB_R3
);
11056 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11060 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
11062 /* Thumb-1 cores (except v6-M) require at least one high
11063 register in a narrow non flag setting add. */
11064 if (Rd
> 7 || Rn
> 7
11065 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
11066 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
11073 inst
.instruction
= T_OPCODE_ADD_HI
;
11074 inst
.instruction
|= (Rd
& 8) << 4;
11075 inst
.instruction
|= (Rd
& 7);
11076 inst
.instruction
|= Rn
<< 3;
11082 constraint (Rd
== REG_PC
, BAD_PC
);
11083 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11084 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
11085 constraint (Rs
== REG_PC
, BAD_PC
);
11086 reject_bad_reg (Rn
);
11088 /* If we get here, it can't be done in 16 bits. */
11089 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
11090 _("shift must be constant"));
11091 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11092 inst
.instruction
|= Rd
<< 8;
11093 inst
.instruction
|= Rs
<< 16;
11094 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
11095 _("shift value over 3 not allowed in thumb mode"));
11096 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
11097 _("only LSL shift allowed in thumb mode"));
11098 encode_thumb32_shifted_operand (2);
11103 constraint (inst
.instruction
== T_MNEM_adds
11104 || inst
.instruction
== T_MNEM_subs
,
11107 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
11109 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
11110 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
11113 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11114 ? 0x0000 : 0x8000);
11115 inst
.instruction
|= (Rd
<< 4) | Rs
;
11116 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11120 Rn
= inst
.operands
[2].reg
;
11121 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
11123 /* We now have Rd, Rs, and Rn set to registers. */
11124 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11126 /* Can't do this for SUB. */
11127 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
11128 inst
.instruction
= T_OPCODE_ADD_HI
;
11129 inst
.instruction
|= (Rd
& 8) << 4;
11130 inst
.instruction
|= (Rd
& 7);
11132 inst
.instruction
|= Rn
<< 3;
11134 inst
.instruction
|= Rs
<< 3;
11136 constraint (1, _("dest must overlap one source register"));
11140 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11141 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
11142 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11152 Rd
= inst
.operands
[0].reg
;
11153 reject_bad_reg (Rd
);
11155 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
11157 /* Defer to section relaxation. */
11158 inst
.relax
= inst
.instruction
;
11159 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11160 inst
.instruction
|= Rd
<< 4;
11162 else if (unified_syntax
&& inst
.size_req
!= 2)
11164 /* Generate a 32-bit opcode. */
11165 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11166 inst
.instruction
|= Rd
<< 8;
11167 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
11168 inst
.relocs
[0].pc_rel
= 1;
11172 /* Generate a 16-bit opcode. */
11173 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11174 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11175 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
11176 inst
.relocs
[0].pc_rel
= 1;
11177 inst
.instruction
|= Rd
<< 4;
11180 if (inst
.relocs
[0].exp
.X_op
== O_symbol
11181 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11182 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11183 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11184 inst
.relocs
[0].exp
.X_add_number
+= 1;
11187 /* Arithmetic instructions for which there is just one 16-bit
11188 instruction encoding, and it allows only two low registers.
11189 For maximal compatibility with ARM syntax, we allow three register
11190 operands even when Thumb-32 instructions are not available, as long
11191 as the first two are identical. For instance, both "sbc r0,r1" and
11192 "sbc r0,r0,r1" are allowed. */
11198 Rd
= inst
.operands
[0].reg
;
11199 Rs
= (inst
.operands
[1].present
11200 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11201 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11202 Rn
= inst
.operands
[2].reg
;
11204 reject_bad_reg (Rd
);
11205 reject_bad_reg (Rs
);
11206 if (inst
.operands
[2].isreg
)
11207 reject_bad_reg (Rn
);
11209 if (unified_syntax
)
11211 if (!inst
.operands
[2].isreg
)
11213 /* For an immediate, we always generate a 32-bit opcode;
11214 section relaxation will shrink it later if possible. */
11215 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11216 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11217 inst
.instruction
|= Rd
<< 8;
11218 inst
.instruction
|= Rs
<< 16;
11219 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11223 bfd_boolean narrow
;
11225 /* See if we can do this with a 16-bit instruction. */
11226 if (THUMB_SETS_FLAGS (inst
.instruction
))
11227 narrow
= !in_pred_block ();
11229 narrow
= in_pred_block ();
11231 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11233 if (inst
.operands
[2].shifted
)
11235 if (inst
.size_req
== 4)
11241 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11242 inst
.instruction
|= Rd
;
11243 inst
.instruction
|= Rn
<< 3;
11247 /* If we get here, it can't be done in 16 bits. */
11248 constraint (inst
.operands
[2].shifted
11249 && inst
.operands
[2].immisreg
,
11250 _("shift must be constant"));
11251 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11252 inst
.instruction
|= Rd
<< 8;
11253 inst
.instruction
|= Rs
<< 16;
11254 encode_thumb32_shifted_operand (2);
11259 /* On its face this is a lie - the instruction does set the
11260 flags. However, the only supported mnemonic in this mode
11261 says it doesn't. */
11262 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11264 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11265 _("unshifted register required"));
11266 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11267 constraint (Rd
!= Rs
,
11268 _("dest and source1 must be the same register"));
11270 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11271 inst
.instruction
|= Rd
;
11272 inst
.instruction
|= Rn
<< 3;
11276 /* Similarly, but for instructions where the arithmetic operation is
11277 commutative, so we can allow either of them to be different from
11278 the destination operand in a 16-bit instruction. For instance, all
11279 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11286 Rd
= inst
.operands
[0].reg
;
11287 Rs
= (inst
.operands
[1].present
11288 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11289 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11290 Rn
= inst
.operands
[2].reg
;
11292 reject_bad_reg (Rd
);
11293 reject_bad_reg (Rs
);
11294 if (inst
.operands
[2].isreg
)
11295 reject_bad_reg (Rn
);
11297 if (unified_syntax
)
11299 if (!inst
.operands
[2].isreg
)
11301 /* For an immediate, we always generate a 32-bit opcode;
11302 section relaxation will shrink it later if possible. */
11303 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11304 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11305 inst
.instruction
|= Rd
<< 8;
11306 inst
.instruction
|= Rs
<< 16;
11307 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11311 bfd_boolean narrow
;
11313 /* See if we can do this with a 16-bit instruction. */
11314 if (THUMB_SETS_FLAGS (inst
.instruction
))
11315 narrow
= !in_pred_block ();
11317 narrow
= in_pred_block ();
11319 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11321 if (inst
.operands
[2].shifted
)
11323 if (inst
.size_req
== 4)
11330 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11331 inst
.instruction
|= Rd
;
11332 inst
.instruction
|= Rn
<< 3;
11337 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11338 inst
.instruction
|= Rd
;
11339 inst
.instruction
|= Rs
<< 3;
11344 /* If we get here, it can't be done in 16 bits. */
11345 constraint (inst
.operands
[2].shifted
11346 && inst
.operands
[2].immisreg
,
11347 _("shift must be constant"));
11348 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11349 inst
.instruction
|= Rd
<< 8;
11350 inst
.instruction
|= Rs
<< 16;
11351 encode_thumb32_shifted_operand (2);
11356 /* On its face this is a lie - the instruction does set the
11357 flags. However, the only supported mnemonic in this mode
11358 says it doesn't. */
11359 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11361 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11362 _("unshifted register required"));
11363 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11365 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11366 inst
.instruction
|= Rd
;
11369 inst
.instruction
|= Rn
<< 3;
11371 inst
.instruction
|= Rs
<< 3;
11373 constraint (1, _("dest must overlap one source register"));
11381 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
11382 constraint (msb
> 32, _("bit-field extends past end of register"));
11383 /* The instruction encoding stores the LSB and MSB,
11384 not the LSB and width. */
11385 Rd
= inst
.operands
[0].reg
;
11386 reject_bad_reg (Rd
);
11387 inst
.instruction
|= Rd
<< 8;
11388 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
11389 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11390 inst
.instruction
|= msb
- 1;
11399 Rd
= inst
.operands
[0].reg
;
11400 reject_bad_reg (Rd
);
11402 /* #0 in second position is alternative syntax for bfc, which is
11403 the same instruction but with REG_PC in the Rm field. */
11404 if (!inst
.operands
[1].isreg
)
11408 Rn
= inst
.operands
[1].reg
;
11409 reject_bad_reg (Rn
);
11412 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11413 constraint (msb
> 32, _("bit-field extends past end of register"));
11414 /* The instruction encoding stores the LSB and MSB,
11415 not the LSB and width. */
11416 inst
.instruction
|= Rd
<< 8;
11417 inst
.instruction
|= Rn
<< 16;
11418 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11419 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11420 inst
.instruction
|= msb
- 1;
11428 Rd
= inst
.operands
[0].reg
;
11429 Rn
= inst
.operands
[1].reg
;
11431 reject_bad_reg (Rd
);
11432 reject_bad_reg (Rn
);
11434 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11435 _("bit-field extends past end of register"));
11436 inst
.instruction
|= Rd
<< 8;
11437 inst
.instruction
|= Rn
<< 16;
11438 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11439 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11440 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11443 /* ARM V5 Thumb BLX (argument parse)
11444 BLX <target_addr> which is BLX(1)
11445 BLX <Rm> which is BLX(2)
11446 Unfortunately, there are two different opcodes for this mnemonic.
11447 So, the insns[].value is not used, and the code here zaps values
11448 into inst.instruction.
11450 ??? How to take advantage of the additional two bits of displacement
11451 available in Thumb32 mode? Need new relocation? */
11456 set_pred_insn_type_last ();
11458 if (inst
.operands
[0].isreg
)
11460 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11461 /* We have a register, so this is BLX(2). */
11462 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11466 /* No register. This must be BLX(1). */
11467 inst
.instruction
= 0xf000e800;
11468 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11477 bfd_reloc_code_real_type reloc
;
11480 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN
);
11482 if (in_pred_block ())
11484 /* Conditional branches inside IT blocks are encoded as unconditional
11486 cond
= COND_ALWAYS
;
11491 if (cond
!= COND_ALWAYS
)
11492 opcode
= T_MNEM_bcond
;
11494 opcode
= inst
.instruction
;
11497 && (inst
.size_req
== 4
11498 || (inst
.size_req
!= 2
11499 && (inst
.operands
[0].hasreloc
11500 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
11502 inst
.instruction
= THUMB_OP32(opcode
);
11503 if (cond
== COND_ALWAYS
)
11504 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11507 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11508 _("selected architecture does not support "
11509 "wide conditional branch instruction"));
11511 gas_assert (cond
!= 0xF);
11512 inst
.instruction
|= cond
<< 22;
11513 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11518 inst
.instruction
= THUMB_OP16(opcode
);
11519 if (cond
== COND_ALWAYS
)
11520 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11523 inst
.instruction
|= cond
<< 8;
11524 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11526 /* Allow section relaxation. */
11527 if (unified_syntax
&& inst
.size_req
!= 2)
11528 inst
.relax
= opcode
;
11530 inst
.relocs
[0].type
= reloc
;
11531 inst
.relocs
[0].pc_rel
= 1;
11534 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11535 between the two is the maximum immediate allowed - which is passed in
11538 do_t_bkpt_hlt1 (int range
)
11540 constraint (inst
.cond
!= COND_ALWAYS
,
11541 _("instruction is always unconditional"));
11542 if (inst
.operands
[0].present
)
11544 constraint (inst
.operands
[0].imm
> range
,
11545 _("immediate value out of range"));
11546 inst
.instruction
|= inst
.operands
[0].imm
;
11549 set_pred_insn_type (NEUTRAL_IT_INSN
);
11555 do_t_bkpt_hlt1 (63);
11561 do_t_bkpt_hlt1 (255);
11565 do_t_branch23 (void)
11567 set_pred_insn_type_last ();
11568 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11570 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11571 this file. We used to simply ignore the PLT reloc type here --
11572 the branch encoding is now needed to deal with TLSCALL relocs.
11573 So if we see a PLT reloc now, put it back to how it used to be to
11574 keep the preexisting behaviour. */
11575 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
11576 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11578 #if defined(OBJ_COFF)
11579 /* If the destination of the branch is a defined symbol which does not have
11580 the THUMB_FUNC attribute, then we must be calling a function which has
11581 the (interfacearm) attribute. We look for the Thumb entry point to that
11582 function and change the branch to refer to that function instead. */
11583 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
11584 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11585 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11586 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11587 inst
.relocs
[0].exp
.X_add_symbol
11588 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
11595 set_pred_insn_type_last ();
11596 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11597 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11598 should cause the alignment to be checked once it is known. This is
11599 because BX PC only works if the instruction is word aligned. */
11607 set_pred_insn_type_last ();
11608 Rm
= inst
.operands
[0].reg
;
11609 reject_bad_reg (Rm
);
11610 inst
.instruction
|= Rm
<< 16;
11619 Rd
= inst
.operands
[0].reg
;
11620 Rm
= inst
.operands
[1].reg
;
11622 reject_bad_reg (Rd
);
11623 reject_bad_reg (Rm
);
11625 inst
.instruction
|= Rd
<< 8;
11626 inst
.instruction
|= Rm
<< 16;
11627 inst
.instruction
|= Rm
;
11633 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11639 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11640 inst
.instruction
|= inst
.operands
[0].imm
;
11646 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11648 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11649 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11651 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11652 inst
.instruction
= 0xf3af8000;
11653 inst
.instruction
|= imod
<< 9;
11654 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11655 if (inst
.operands
[1].present
)
11656 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11660 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11661 && (inst
.operands
[0].imm
& 4),
11662 _("selected processor does not support 'A' form "
11663 "of this instruction"));
11664 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11665 _("Thumb does not support the 2-argument "
11666 "form of this instruction"));
11667 inst
.instruction
|= inst
.operands
[0].imm
;
11671 /* THUMB CPY instruction (argument parse). */
11676 if (inst
.size_req
== 4)
11678 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11679 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11680 inst
.instruction
|= inst
.operands
[1].reg
;
11684 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11685 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11686 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11693 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11694 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11695 inst
.instruction
|= inst
.operands
[0].reg
;
11696 inst
.relocs
[0].pc_rel
= 1;
11697 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11703 inst
.instruction
|= inst
.operands
[0].imm
;
11709 unsigned Rd
, Rn
, Rm
;
11711 Rd
= inst
.operands
[0].reg
;
11712 Rn
= (inst
.operands
[1].present
11713 ? inst
.operands
[1].reg
: Rd
);
11714 Rm
= inst
.operands
[2].reg
;
11716 reject_bad_reg (Rd
);
11717 reject_bad_reg (Rn
);
11718 reject_bad_reg (Rm
);
11720 inst
.instruction
|= Rd
<< 8;
11721 inst
.instruction
|= Rn
<< 16;
11722 inst
.instruction
|= Rm
;
11728 if (unified_syntax
&& inst
.size_req
== 4)
11729 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11731 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11737 unsigned int cond
= inst
.operands
[0].imm
;
11739 set_pred_insn_type (IT_INSN
);
11740 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
11741 now_pred
.cc
= cond
;
11742 now_pred
.warn_deprecated
= FALSE
;
11743 now_pred
.type
= SCALAR_PRED
;
11745 /* If the condition is a negative condition, invert the mask. */
11746 if ((cond
& 0x1) == 0x0)
11748 unsigned int mask
= inst
.instruction
& 0x000f;
11750 if ((mask
& 0x7) == 0)
11752 /* No conversion needed. */
11753 now_pred
.block_length
= 1;
11755 else if ((mask
& 0x3) == 0)
11758 now_pred
.block_length
= 2;
11760 else if ((mask
& 0x1) == 0)
11763 now_pred
.block_length
= 3;
11768 now_pred
.block_length
= 4;
11771 inst
.instruction
&= 0xfff0;
11772 inst
.instruction
|= mask
;
11775 inst
.instruction
|= cond
<< 4;
11781 /* We are dealing with a vector predicated block. */
11782 set_pred_insn_type (VPT_INSN
);
11784 now_pred
.mask
= ((inst
.instruction
& 0x00400000) >> 19)
11785 | ((inst
.instruction
& 0xe000) >> 13);
11786 now_pred
.warn_deprecated
= FALSE
;
11787 now_pred
.type
= VECTOR_PRED
;
11790 /* Helper function used for both push/pop and ldm/stm. */
11792 encode_thumb2_multi (bfd_boolean do_io
, int base
, unsigned mask
,
11793 bfd_boolean writeback
)
11795 bfd_boolean load
, store
;
11797 gas_assert (base
!= -1 || !do_io
);
11798 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
11799 store
= do_io
&& !load
;
11801 if (mask
& (1 << 13))
11802 inst
.error
= _("SP not allowed in register list");
11804 if (do_io
&& (mask
& (1 << base
)) != 0
11806 inst
.error
= _("having the base register in the register list when "
11807 "using write back is UNPREDICTABLE");
11811 if (mask
& (1 << 15))
11813 if (mask
& (1 << 14))
11814 inst
.error
= _("LR and PC should not both be in register list");
11816 set_pred_insn_type_last ();
11821 if (mask
& (1 << 15))
11822 inst
.error
= _("PC not allowed in register list");
11825 if (do_io
&& ((mask
& (mask
- 1)) == 0))
11827 /* Single register transfers implemented as str/ldr. */
11830 if (inst
.instruction
& (1 << 23))
11831 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11833 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11837 if (inst
.instruction
& (1 << 23))
11838 inst
.instruction
= 0x00800000; /* ia -> [base] */
11840 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11843 inst
.instruction
|= 0xf8400000;
11845 inst
.instruction
|= 0x00100000;
11847 mask
= ffs (mask
) - 1;
11850 else if (writeback
)
11851 inst
.instruction
|= WRITE_BACK
;
11853 inst
.instruction
|= mask
;
11855 inst
.instruction
|= base
<< 16;
11861 /* This really doesn't seem worth it. */
11862 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
11863 _("expression too complex"));
11864 constraint (inst
.operands
[1].writeback
,
11865 _("Thumb load/store multiple does not support {reglist}^"));
11867 if (unified_syntax
)
11869 bfd_boolean narrow
;
11873 /* See if we can use a 16-bit instruction. */
11874 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11875 && inst
.size_req
!= 4
11876 && !(inst
.operands
[1].imm
& ~0xff))
11878 mask
= 1 << inst
.operands
[0].reg
;
11880 if (inst
.operands
[0].reg
<= 7)
11882 if (inst
.instruction
== T_MNEM_stmia
11883 ? inst
.operands
[0].writeback
11884 : (inst
.operands
[0].writeback
11885 == !(inst
.operands
[1].imm
& mask
)))
11887 if (inst
.instruction
== T_MNEM_stmia
11888 && (inst
.operands
[1].imm
& mask
)
11889 && (inst
.operands
[1].imm
& (mask
- 1)))
11890 as_warn (_("value stored for r%d is UNKNOWN"),
11891 inst
.operands
[0].reg
);
11893 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11894 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11895 inst
.instruction
|= inst
.operands
[1].imm
;
11898 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11900 /* This means 1 register in reg list one of 3 situations:
11901 1. Instruction is stmia, but without writeback.
11902 2. lmdia without writeback, but with Rn not in
11904 3. ldmia with writeback, but with Rn in reglist.
11905 Case 3 is UNPREDICTABLE behaviour, so we handle
11906 case 1 and 2 which can be converted into a 16-bit
11907 str or ldr. The SP cases are handled below. */
11908 unsigned long opcode
;
11909 /* First, record an error for Case 3. */
11910 if (inst
.operands
[1].imm
& mask
11911 && inst
.operands
[0].writeback
)
11913 _("having the base register in the register list when "
11914 "using write back is UNPREDICTABLE");
11916 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11918 inst
.instruction
= THUMB_OP16 (opcode
);
11919 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11920 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11924 else if (inst
.operands
[0] .reg
== REG_SP
)
11926 if (inst
.operands
[0].writeback
)
11929 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11930 ? T_MNEM_push
: T_MNEM_pop
);
11931 inst
.instruction
|= inst
.operands
[1].imm
;
11934 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11937 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11938 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11939 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11947 if (inst
.instruction
< 0xffff)
11948 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11950 encode_thumb2_multi (TRUE
/* do_io */, inst
.operands
[0].reg
,
11951 inst
.operands
[1].imm
,
11952 inst
.operands
[0].writeback
);
11957 constraint (inst
.operands
[0].reg
> 7
11958 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11959 constraint (inst
.instruction
!= T_MNEM_ldmia
11960 && inst
.instruction
!= T_MNEM_stmia
,
11961 _("Thumb-2 instruction only valid in unified syntax"));
11962 if (inst
.instruction
== T_MNEM_stmia
)
11964 if (!inst
.operands
[0].writeback
)
11965 as_warn (_("this instruction will write back the base register"));
11966 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11967 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11968 as_warn (_("value stored for r%d is UNKNOWN"),
11969 inst
.operands
[0].reg
);
11973 if (!inst
.operands
[0].writeback
11974 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11975 as_warn (_("this instruction will write back the base register"));
11976 else if (inst
.operands
[0].writeback
11977 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11978 as_warn (_("this instruction will not write back the base register"));
11981 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11982 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11983 inst
.instruction
|= inst
.operands
[1].imm
;
11990 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11991 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11992 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11993 || inst
.operands
[1].negative
,
11996 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11998 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11999 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12000 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12006 if (!inst
.operands
[1].present
)
12008 constraint (inst
.operands
[0].reg
== REG_LR
,
12009 _("r14 not allowed as first register "
12010 "when second register is omitted"));
12011 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12013 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12016 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12017 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12018 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12024 unsigned long opcode
;
12027 if (inst
.operands
[0].isreg
12028 && !inst
.operands
[0].preind
12029 && inst
.operands
[0].reg
== REG_PC
)
12030 set_pred_insn_type_last ();
12032 opcode
= inst
.instruction
;
12033 if (unified_syntax
)
12035 if (!inst
.operands
[1].isreg
)
12037 if (opcode
<= 0xffff)
12038 inst
.instruction
= THUMB_OP32 (opcode
);
12039 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12042 if (inst
.operands
[1].isreg
12043 && !inst
.operands
[1].writeback
12044 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
12045 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
12046 && opcode
<= 0xffff
12047 && inst
.size_req
!= 4)
12049 /* Insn may have a 16-bit form. */
12050 Rn
= inst
.operands
[1].reg
;
12051 if (inst
.operands
[1].immisreg
)
12053 inst
.instruction
= THUMB_OP16 (opcode
);
12055 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
12057 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
12058 reject_bad_reg (inst
.operands
[1].imm
);
12060 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
12061 && opcode
!= T_MNEM_ldrsb
)
12062 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
12063 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
12070 if (inst
.relocs
[0].pc_rel
)
12071 opcode
= T_MNEM_ldr_pc2
;
12073 opcode
= T_MNEM_ldr_pc
;
12077 if (opcode
== T_MNEM_ldr
)
12078 opcode
= T_MNEM_ldr_sp
;
12080 opcode
= T_MNEM_str_sp
;
12082 inst
.instruction
= inst
.operands
[0].reg
<< 8;
12086 inst
.instruction
= inst
.operands
[0].reg
;
12087 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12089 inst
.instruction
|= THUMB_OP16 (opcode
);
12090 if (inst
.size_req
== 2)
12091 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12093 inst
.relax
= opcode
;
12097 /* Definitely a 32-bit variant. */
12099 /* Warning for Erratum 752419. */
12100 if (opcode
== T_MNEM_ldr
12101 && inst
.operands
[0].reg
== REG_SP
12102 && inst
.operands
[1].writeback
== 1
12103 && !inst
.operands
[1].immisreg
)
12105 if (no_cpu_selected ()
12106 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
12107 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
12108 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
12109 as_warn (_("This instruction may be unpredictable "
12110 "if executed on M-profile cores "
12111 "with interrupts enabled."));
12114 /* Do some validations regarding addressing modes. */
12115 if (inst
.operands
[1].immisreg
)
12116 reject_bad_reg (inst
.operands
[1].imm
);
12118 constraint (inst
.operands
[1].writeback
== 1
12119 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12122 inst
.instruction
= THUMB_OP32 (opcode
);
12123 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12124 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12125 check_ldr_r15_aligned ();
12129 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
12131 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
12133 /* Only [Rn,Rm] is acceptable. */
12134 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
12135 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
12136 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
12137 || inst
.operands
[1].negative
,
12138 _("Thumb does not support this addressing mode"));
12139 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12143 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12144 if (!inst
.operands
[1].isreg
)
12145 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12148 constraint (!inst
.operands
[1].preind
12149 || inst
.operands
[1].shifted
12150 || inst
.operands
[1].writeback
,
12151 _("Thumb does not support this addressing mode"));
12152 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
12154 constraint (inst
.instruction
& 0x0600,
12155 _("byte or halfword not valid for base register"));
12156 constraint (inst
.operands
[1].reg
== REG_PC
12157 && !(inst
.instruction
& THUMB_LOAD_BIT
),
12158 _("r15 based store not allowed"));
12159 constraint (inst
.operands
[1].immisreg
,
12160 _("invalid base register for register offset"));
12162 if (inst
.operands
[1].reg
== REG_PC
)
12163 inst
.instruction
= T_OPCODE_LDR_PC
;
12164 else if (inst
.instruction
& THUMB_LOAD_BIT
)
12165 inst
.instruction
= T_OPCODE_LDR_SP
;
12167 inst
.instruction
= T_OPCODE_STR_SP
;
12169 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12170 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12174 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
12175 if (!inst
.operands
[1].immisreg
)
12177 /* Immediate offset. */
12178 inst
.instruction
|= inst
.operands
[0].reg
;
12179 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12180 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12184 /* Register offset. */
12185 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
12186 constraint (inst
.operands
[1].negative
,
12187 _("Thumb does not support this addressing mode"));
12190 switch (inst
.instruction
)
12192 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
12193 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
12194 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
12195 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
12196 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
12197 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
12198 case 0x5600 /* ldrsb */:
12199 case 0x5e00 /* ldrsh */: break;
12203 inst
.instruction
|= inst
.operands
[0].reg
;
12204 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12205 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
12211 if (!inst
.operands
[1].present
)
12213 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12214 constraint (inst
.operands
[0].reg
== REG_LR
,
12215 _("r14 not allowed here"));
12216 constraint (inst
.operands
[0].reg
== REG_R12
,
12217 _("r12 not allowed here"));
12220 if (inst
.operands
[2].writeback
12221 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
12222 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
12223 as_warn (_("base register written back, and overlaps "
12224 "one of transfer registers"));
12226 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12227 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12228 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
12234 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12235 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
12241 unsigned Rd
, Rn
, Rm
, Ra
;
12243 Rd
= inst
.operands
[0].reg
;
12244 Rn
= inst
.operands
[1].reg
;
12245 Rm
= inst
.operands
[2].reg
;
12246 Ra
= inst
.operands
[3].reg
;
12248 reject_bad_reg (Rd
);
12249 reject_bad_reg (Rn
);
12250 reject_bad_reg (Rm
);
12251 reject_bad_reg (Ra
);
12253 inst
.instruction
|= Rd
<< 8;
12254 inst
.instruction
|= Rn
<< 16;
12255 inst
.instruction
|= Rm
;
12256 inst
.instruction
|= Ra
<< 12;
12262 unsigned RdLo
, RdHi
, Rn
, Rm
;
12264 RdLo
= inst
.operands
[0].reg
;
12265 RdHi
= inst
.operands
[1].reg
;
12266 Rn
= inst
.operands
[2].reg
;
12267 Rm
= inst
.operands
[3].reg
;
12269 reject_bad_reg (RdLo
);
12270 reject_bad_reg (RdHi
);
12271 reject_bad_reg (Rn
);
12272 reject_bad_reg (Rm
);
12274 inst
.instruction
|= RdLo
<< 12;
12275 inst
.instruction
|= RdHi
<< 8;
12276 inst
.instruction
|= Rn
<< 16;
12277 inst
.instruction
|= Rm
;
12281 do_t_mov_cmp (void)
12285 Rn
= inst
.operands
[0].reg
;
12286 Rm
= inst
.operands
[1].reg
;
12289 set_pred_insn_type_last ();
12291 if (unified_syntax
)
12293 int r0off
= (inst
.instruction
== T_MNEM_mov
12294 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
12295 unsigned long opcode
;
12296 bfd_boolean narrow
;
12297 bfd_boolean low_regs
;
12299 low_regs
= (Rn
<= 7 && Rm
<= 7);
12300 opcode
= inst
.instruction
;
12301 if (in_pred_block ())
12302 narrow
= opcode
!= T_MNEM_movs
;
12304 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
12305 if (inst
.size_req
== 4
12306 || inst
.operands
[1].shifted
)
12309 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12310 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
12311 && !inst
.operands
[1].shifted
12315 inst
.instruction
= T2_SUBS_PC_LR
;
12319 if (opcode
== T_MNEM_cmp
)
12321 constraint (Rn
== REG_PC
, BAD_PC
);
12324 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12326 warn_deprecated_sp (Rm
);
12327 /* R15 was documented as a valid choice for Rm in ARMv6,
12328 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12329 tools reject R15, so we do too. */
12330 constraint (Rm
== REG_PC
, BAD_PC
);
12333 reject_bad_reg (Rm
);
12335 else if (opcode
== T_MNEM_mov
12336 || opcode
== T_MNEM_movs
)
12338 if (inst
.operands
[1].isreg
)
12340 if (opcode
== T_MNEM_movs
)
12342 reject_bad_reg (Rn
);
12343 reject_bad_reg (Rm
);
12347 /* This is mov.n. */
12348 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
12349 && (Rm
== REG_SP
|| Rm
== REG_PC
))
12351 as_tsktsk (_("Use of r%u as a source register is "
12352 "deprecated when r%u is the destination "
12353 "register."), Rm
, Rn
);
12358 /* This is mov.w. */
12359 constraint (Rn
== REG_PC
, BAD_PC
);
12360 constraint (Rm
== REG_PC
, BAD_PC
);
12361 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12362 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
12366 reject_bad_reg (Rn
);
12369 if (!inst
.operands
[1].isreg
)
12371 /* Immediate operand. */
12372 if (!in_pred_block () && opcode
== T_MNEM_mov
)
12374 if (low_regs
&& narrow
)
12376 inst
.instruction
= THUMB_OP16 (opcode
);
12377 inst
.instruction
|= Rn
<< 8;
12378 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12379 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
12381 if (inst
.size_req
== 2)
12382 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12384 inst
.relax
= opcode
;
12389 constraint ((inst
.relocs
[0].type
12390 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
12391 && (inst
.relocs
[0].type
12392 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
12393 THUMB1_RELOC_ONLY
);
12395 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12396 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12397 inst
.instruction
|= Rn
<< r0off
;
12398 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12401 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
12402 && (inst
.instruction
== T_MNEM_mov
12403 || inst
.instruction
== T_MNEM_movs
))
12405 /* Register shifts are encoded as separate shift instructions. */
12406 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
12408 if (in_pred_block ())
12413 if (inst
.size_req
== 4)
12416 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12422 switch (inst
.operands
[1].shift_kind
)
12425 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12428 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12431 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12434 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12440 inst
.instruction
= opcode
;
12443 inst
.instruction
|= Rn
;
12444 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12449 inst
.instruction
|= CONDS_BIT
;
12451 inst
.instruction
|= Rn
<< 8;
12452 inst
.instruction
|= Rm
<< 16;
12453 inst
.instruction
|= inst
.operands
[1].imm
;
12458 /* Some mov with immediate shift have narrow variants.
12459 Register shifts are handled above. */
12460 if (low_regs
&& inst
.operands
[1].shifted
12461 && (inst
.instruction
== T_MNEM_mov
12462 || inst
.instruction
== T_MNEM_movs
))
12464 if (in_pred_block ())
12465 narrow
= (inst
.instruction
== T_MNEM_mov
);
12467 narrow
= (inst
.instruction
== T_MNEM_movs
);
12472 switch (inst
.operands
[1].shift_kind
)
12474 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12475 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12476 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12477 default: narrow
= FALSE
; break;
12483 inst
.instruction
|= Rn
;
12484 inst
.instruction
|= Rm
<< 3;
12485 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12489 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12490 inst
.instruction
|= Rn
<< r0off
;
12491 encode_thumb32_shifted_operand (1);
12495 switch (inst
.instruction
)
12498 /* In v4t or v5t a move of two lowregs produces unpredictable
12499 results. Don't allow this. */
12502 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12503 "MOV Rd, Rs with two low registers is not "
12504 "permitted on this architecture");
12505 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12509 inst
.instruction
= T_OPCODE_MOV_HR
;
12510 inst
.instruction
|= (Rn
& 0x8) << 4;
12511 inst
.instruction
|= (Rn
& 0x7);
12512 inst
.instruction
|= Rm
<< 3;
12516 /* We know we have low registers at this point.
12517 Generate LSLS Rd, Rs, #0. */
12518 inst
.instruction
= T_OPCODE_LSL_I
;
12519 inst
.instruction
|= Rn
;
12520 inst
.instruction
|= Rm
<< 3;
12526 inst
.instruction
= T_OPCODE_CMP_LR
;
12527 inst
.instruction
|= Rn
;
12528 inst
.instruction
|= Rm
<< 3;
12532 inst
.instruction
= T_OPCODE_CMP_HR
;
12533 inst
.instruction
|= (Rn
& 0x8) << 4;
12534 inst
.instruction
|= (Rn
& 0x7);
12535 inst
.instruction
|= Rm
<< 3;
12542 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12544 /* PR 10443: Do not silently ignore shifted operands. */
12545 constraint (inst
.operands
[1].shifted
,
12546 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12548 if (inst
.operands
[1].isreg
)
12550 if (Rn
< 8 && Rm
< 8)
12552 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12553 since a MOV instruction produces unpredictable results. */
12554 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12555 inst
.instruction
= T_OPCODE_ADD_I3
;
12557 inst
.instruction
= T_OPCODE_CMP_LR
;
12559 inst
.instruction
|= Rn
;
12560 inst
.instruction
|= Rm
<< 3;
12564 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12565 inst
.instruction
= T_OPCODE_MOV_HR
;
12567 inst
.instruction
= T_OPCODE_CMP_HR
;
12573 constraint (Rn
> 7,
12574 _("only lo regs allowed with immediate"));
12575 inst
.instruction
|= Rn
<< 8;
12576 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12587 top
= (inst
.instruction
& 0x00800000) != 0;
12588 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
12590 constraint (top
, _(":lower16: not allowed in this instruction"));
12591 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
12593 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
12595 constraint (!top
, _(":upper16: not allowed in this instruction"));
12596 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
12599 Rd
= inst
.operands
[0].reg
;
12600 reject_bad_reg (Rd
);
12602 inst
.instruction
|= Rd
<< 8;
12603 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
12605 imm
= inst
.relocs
[0].exp
.X_add_number
;
12606 inst
.instruction
|= (imm
& 0xf000) << 4;
12607 inst
.instruction
|= (imm
& 0x0800) << 15;
12608 inst
.instruction
|= (imm
& 0x0700) << 4;
12609 inst
.instruction
|= (imm
& 0x00ff);
12614 do_t_mvn_tst (void)
12618 Rn
= inst
.operands
[0].reg
;
12619 Rm
= inst
.operands
[1].reg
;
12621 if (inst
.instruction
== T_MNEM_cmp
12622 || inst
.instruction
== T_MNEM_cmn
)
12623 constraint (Rn
== REG_PC
, BAD_PC
);
12625 reject_bad_reg (Rn
);
12626 reject_bad_reg (Rm
);
12628 if (unified_syntax
)
12630 int r0off
= (inst
.instruction
== T_MNEM_mvn
12631 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12632 bfd_boolean narrow
;
12634 if (inst
.size_req
== 4
12635 || inst
.instruction
> 0xffff
12636 || inst
.operands
[1].shifted
12637 || Rn
> 7 || Rm
> 7)
12639 else if (inst
.instruction
== T_MNEM_cmn
12640 || inst
.instruction
== T_MNEM_tst
)
12642 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12643 narrow
= !in_pred_block ();
12645 narrow
= in_pred_block ();
12647 if (!inst
.operands
[1].isreg
)
12649 /* For an immediate, we always generate a 32-bit opcode;
12650 section relaxation will shrink it later if possible. */
12651 if (inst
.instruction
< 0xffff)
12652 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12653 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12654 inst
.instruction
|= Rn
<< r0off
;
12655 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12659 /* See if we can do this with a 16-bit instruction. */
12662 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12663 inst
.instruction
|= Rn
;
12664 inst
.instruction
|= Rm
<< 3;
12668 constraint (inst
.operands
[1].shifted
12669 && inst
.operands
[1].immisreg
,
12670 _("shift must be constant"));
12671 if (inst
.instruction
< 0xffff)
12672 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12673 inst
.instruction
|= Rn
<< r0off
;
12674 encode_thumb32_shifted_operand (1);
12680 constraint (inst
.instruction
> 0xffff
12681 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12682 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12683 _("unshifted register required"));
12684 constraint (Rn
> 7 || Rm
> 7,
12687 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12688 inst
.instruction
|= Rn
;
12689 inst
.instruction
|= Rm
<< 3;
12698 if (do_vfp_nsyn_mrs () == SUCCESS
)
12701 Rd
= inst
.operands
[0].reg
;
12702 reject_bad_reg (Rd
);
12703 inst
.instruction
|= Rd
<< 8;
12705 if (inst
.operands
[1].isreg
)
12707 unsigned br
= inst
.operands
[1].reg
;
12708 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12709 as_bad (_("bad register for mrs"));
12711 inst
.instruction
|= br
& (0xf << 16);
12712 inst
.instruction
|= (br
& 0x300) >> 4;
12713 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12717 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12719 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12721 /* PR gas/12698: The constraint is only applied for m_profile.
12722 If the user has specified -march=all, we want to ignore it as
12723 we are building for any CPU type, including non-m variants. */
12724 bfd_boolean m_profile
=
12725 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12726 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12727 "not support requested special purpose register"));
12730 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12732 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12733 _("'APSR', 'CPSR' or 'SPSR' expected"));
12735 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12736 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12737 inst
.instruction
|= 0xf0000;
12747 if (do_vfp_nsyn_msr () == SUCCESS
)
12750 constraint (!inst
.operands
[1].isreg
,
12751 _("Thumb encoding does not support an immediate here"));
12753 if (inst
.operands
[0].isreg
)
12754 flags
= (int)(inst
.operands
[0].reg
);
12756 flags
= inst
.operands
[0].imm
;
12758 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12760 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12762 /* PR gas/12698: The constraint is only applied for m_profile.
12763 If the user has specified -march=all, we want to ignore it as
12764 we are building for any CPU type, including non-m variants. */
12765 bfd_boolean m_profile
=
12766 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12767 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12768 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12769 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12770 && bits
!= PSR_f
)) && m_profile
,
12771 _("selected processor does not support requested special "
12772 "purpose register"));
12775 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12776 "requested special purpose register"));
12778 Rn
= inst
.operands
[1].reg
;
12779 reject_bad_reg (Rn
);
12781 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12782 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12783 inst
.instruction
|= (flags
& 0x300) >> 4;
12784 inst
.instruction
|= (flags
& 0xff);
12785 inst
.instruction
|= Rn
<< 16;
12791 bfd_boolean narrow
;
12792 unsigned Rd
, Rn
, Rm
;
12794 if (!inst
.operands
[2].present
)
12795 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12797 Rd
= inst
.operands
[0].reg
;
12798 Rn
= inst
.operands
[1].reg
;
12799 Rm
= inst
.operands
[2].reg
;
12801 if (unified_syntax
)
12803 if (inst
.size_req
== 4
12809 else if (inst
.instruction
== T_MNEM_muls
)
12810 narrow
= !in_pred_block ();
12812 narrow
= in_pred_block ();
12816 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12817 constraint (Rn
> 7 || Rm
> 7,
12824 /* 16-bit MULS/Conditional MUL. */
12825 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12826 inst
.instruction
|= Rd
;
12829 inst
.instruction
|= Rm
<< 3;
12831 inst
.instruction
|= Rn
<< 3;
12833 constraint (1, _("dest must overlap one source register"));
12837 constraint (inst
.instruction
!= T_MNEM_mul
,
12838 _("Thumb-2 MUL must not set flags"));
12840 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12841 inst
.instruction
|= Rd
<< 8;
12842 inst
.instruction
|= Rn
<< 16;
12843 inst
.instruction
|= Rm
<< 0;
12845 reject_bad_reg (Rd
);
12846 reject_bad_reg (Rn
);
12847 reject_bad_reg (Rm
);
12854 unsigned RdLo
, RdHi
, Rn
, Rm
;
12856 RdLo
= inst
.operands
[0].reg
;
12857 RdHi
= inst
.operands
[1].reg
;
12858 Rn
= inst
.operands
[2].reg
;
12859 Rm
= inst
.operands
[3].reg
;
12861 reject_bad_reg (RdLo
);
12862 reject_bad_reg (RdHi
);
12863 reject_bad_reg (Rn
);
12864 reject_bad_reg (Rm
);
12866 inst
.instruction
|= RdLo
<< 12;
12867 inst
.instruction
|= RdHi
<< 8;
12868 inst
.instruction
|= Rn
<< 16;
12869 inst
.instruction
|= Rm
;
12872 as_tsktsk (_("rdhi and rdlo must be different"));
12878 set_pred_insn_type (NEUTRAL_IT_INSN
);
12880 if (unified_syntax
)
12882 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12884 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12885 inst
.instruction
|= inst
.operands
[0].imm
;
12889 /* PR9722: Check for Thumb2 availability before
12890 generating a thumb2 nop instruction. */
12891 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12893 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12894 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12897 inst
.instruction
= 0x46c0;
12902 constraint (inst
.operands
[0].present
,
12903 _("Thumb does not support NOP with hints"));
12904 inst
.instruction
= 0x46c0;
12911 if (unified_syntax
)
12913 bfd_boolean narrow
;
12915 if (THUMB_SETS_FLAGS (inst
.instruction
))
12916 narrow
= !in_pred_block ();
12918 narrow
= in_pred_block ();
12919 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12921 if (inst
.size_req
== 4)
12926 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12927 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12928 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12932 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12933 inst
.instruction
|= inst
.operands
[0].reg
;
12934 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12939 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12941 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12943 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12944 inst
.instruction
|= inst
.operands
[0].reg
;
12945 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12954 Rd
= inst
.operands
[0].reg
;
12955 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12957 reject_bad_reg (Rd
);
12958 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12959 reject_bad_reg (Rn
);
12961 inst
.instruction
|= Rd
<< 8;
12962 inst
.instruction
|= Rn
<< 16;
12964 if (!inst
.operands
[2].isreg
)
12966 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12967 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12973 Rm
= inst
.operands
[2].reg
;
12974 reject_bad_reg (Rm
);
12976 constraint (inst
.operands
[2].shifted
12977 && inst
.operands
[2].immisreg
,
12978 _("shift must be constant"));
12979 encode_thumb32_shifted_operand (2);
12986 unsigned Rd
, Rn
, Rm
;
12988 Rd
= inst
.operands
[0].reg
;
12989 Rn
= inst
.operands
[1].reg
;
12990 Rm
= inst
.operands
[2].reg
;
12992 reject_bad_reg (Rd
);
12993 reject_bad_reg (Rn
);
12994 reject_bad_reg (Rm
);
12996 inst
.instruction
|= Rd
<< 8;
12997 inst
.instruction
|= Rn
<< 16;
12998 inst
.instruction
|= Rm
;
12999 if (inst
.operands
[3].present
)
13001 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
13002 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13003 _("expression too complex"));
13004 inst
.instruction
|= (val
& 0x1c) << 10;
13005 inst
.instruction
|= (val
& 0x03) << 6;
13012 if (!inst
.operands
[3].present
)
13016 inst
.instruction
&= ~0x00000020;
13018 /* PR 10168. Swap the Rm and Rn registers. */
13019 Rtmp
= inst
.operands
[1].reg
;
13020 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
13021 inst
.operands
[2].reg
= Rtmp
;
13029 if (inst
.operands
[0].immisreg
)
13030 reject_bad_reg (inst
.operands
[0].imm
);
13032 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
13036 do_t_push_pop (void)
13040 constraint (inst
.operands
[0].writeback
,
13041 _("push/pop do not support {reglist}^"));
13042 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
13043 _("expression too complex"));
13045 mask
= inst
.operands
[0].imm
;
13046 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
13047 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
13048 else if (inst
.size_req
!= 4
13049 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
13050 ? REG_LR
: REG_PC
)))
13052 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13053 inst
.instruction
|= THUMB_PP_PC_LR
;
13054 inst
.instruction
|= mask
& 0xff;
13056 else if (unified_syntax
)
13058 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13059 encode_thumb2_multi (TRUE
/* do_io */, 13, mask
, TRUE
);
13063 inst
.error
= _("invalid register list to push/pop instruction");
13071 if (unified_syntax
)
13072 encode_thumb2_multi (FALSE
/* do_io */, -1, inst
.operands
[0].imm
, FALSE
);
13075 inst
.error
= _("invalid register list to push/pop instruction");
13081 do_t_vscclrm (void)
13083 if (inst
.operands
[0].issingle
)
13085 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1) << 22;
13086 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1e) << 11;
13087 inst
.instruction
|= inst
.operands
[0].imm
;
13091 inst
.instruction
|= (inst
.operands
[0].reg
& 0x10) << 18;
13092 inst
.instruction
|= (inst
.operands
[0].reg
& 0xf) << 12;
13093 inst
.instruction
|= 1 << 8;
13094 inst
.instruction
|= inst
.operands
[0].imm
<< 1;
13103 Rd
= inst
.operands
[0].reg
;
13104 Rm
= inst
.operands
[1].reg
;
13106 reject_bad_reg (Rd
);
13107 reject_bad_reg (Rm
);
13109 inst
.instruction
|= Rd
<< 8;
13110 inst
.instruction
|= Rm
<< 16;
13111 inst
.instruction
|= Rm
;
13119 Rd
= inst
.operands
[0].reg
;
13120 Rm
= inst
.operands
[1].reg
;
13122 reject_bad_reg (Rd
);
13123 reject_bad_reg (Rm
);
13125 if (Rd
<= 7 && Rm
<= 7
13126 && inst
.size_req
!= 4)
13128 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13129 inst
.instruction
|= Rd
;
13130 inst
.instruction
|= Rm
<< 3;
13132 else if (unified_syntax
)
13134 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13135 inst
.instruction
|= Rd
<< 8;
13136 inst
.instruction
|= Rm
<< 16;
13137 inst
.instruction
|= Rm
;
13140 inst
.error
= BAD_HIREG
;
13148 Rd
= inst
.operands
[0].reg
;
13149 Rm
= inst
.operands
[1].reg
;
13151 reject_bad_reg (Rd
);
13152 reject_bad_reg (Rm
);
13154 inst
.instruction
|= Rd
<< 8;
13155 inst
.instruction
|= Rm
;
13163 Rd
= inst
.operands
[0].reg
;
13164 Rs
= (inst
.operands
[1].present
13165 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
13166 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
13168 reject_bad_reg (Rd
);
13169 reject_bad_reg (Rs
);
13170 if (inst
.operands
[2].isreg
)
13171 reject_bad_reg (inst
.operands
[2].reg
);
13173 inst
.instruction
|= Rd
<< 8;
13174 inst
.instruction
|= Rs
<< 16;
13175 if (!inst
.operands
[2].isreg
)
13177 bfd_boolean narrow
;
13179 if ((inst
.instruction
& 0x00100000) != 0)
13180 narrow
= !in_pred_block ();
13182 narrow
= in_pred_block ();
13184 if (Rd
> 7 || Rs
> 7)
13187 if (inst
.size_req
== 4 || !unified_syntax
)
13190 if (inst
.relocs
[0].exp
.X_op
!= O_constant
13191 || inst
.relocs
[0].exp
.X_add_number
!= 0)
13194 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13195 relaxation, but it doesn't seem worth the hassle. */
13198 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13199 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
13200 inst
.instruction
|= Rs
<< 3;
13201 inst
.instruction
|= Rd
;
13205 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13206 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13210 encode_thumb32_shifted_operand (2);
13216 if (warn_on_deprecated
13217 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13218 as_tsktsk (_("setend use is deprecated for ARMv8"));
13220 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13221 if (inst
.operands
[0].imm
)
13222 inst
.instruction
|= 0x8;
13228 if (!inst
.operands
[1].present
)
13229 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
13231 if (unified_syntax
)
13233 bfd_boolean narrow
;
13236 switch (inst
.instruction
)
13239 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
13241 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
13243 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
13245 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
13249 if (THUMB_SETS_FLAGS (inst
.instruction
))
13250 narrow
= !in_pred_block ();
13252 narrow
= in_pred_block ();
13253 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13255 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
13257 if (inst
.operands
[2].isreg
13258 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
13259 || inst
.operands
[2].reg
> 7))
13261 if (inst
.size_req
== 4)
13264 reject_bad_reg (inst
.operands
[0].reg
);
13265 reject_bad_reg (inst
.operands
[1].reg
);
13269 if (inst
.operands
[2].isreg
)
13271 reject_bad_reg (inst
.operands
[2].reg
);
13272 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13273 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13274 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13275 inst
.instruction
|= inst
.operands
[2].reg
;
13277 /* PR 12854: Error on extraneous shifts. */
13278 constraint (inst
.operands
[2].shifted
,
13279 _("extraneous shift as part of operand to shift insn"));
13283 inst
.operands
[1].shifted
= 1;
13284 inst
.operands
[1].shift_kind
= shift_kind
;
13285 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
13286 ? T_MNEM_movs
: T_MNEM_mov
);
13287 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13288 encode_thumb32_shifted_operand (1);
13289 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13290 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13295 if (inst
.operands
[2].isreg
)
13297 switch (shift_kind
)
13299 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13300 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13301 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13302 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13306 inst
.instruction
|= inst
.operands
[0].reg
;
13307 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13309 /* PR 12854: Error on extraneous shifts. */
13310 constraint (inst
.operands
[2].shifted
,
13311 _("extraneous shift as part of operand to shift insn"));
13315 switch (shift_kind
)
13317 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13318 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13319 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13322 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13323 inst
.instruction
|= inst
.operands
[0].reg
;
13324 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13330 constraint (inst
.operands
[0].reg
> 7
13331 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
13332 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13334 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
13336 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
13337 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13338 _("source1 and dest must be same register"));
13340 switch (inst
.instruction
)
13342 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13343 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13344 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13345 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13349 inst
.instruction
|= inst
.operands
[0].reg
;
13350 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13352 /* PR 12854: Error on extraneous shifts. */
13353 constraint (inst
.operands
[2].shifted
,
13354 _("extraneous shift as part of operand to shift insn"));
13358 switch (inst
.instruction
)
13360 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13361 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13362 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13363 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
13366 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13367 inst
.instruction
|= inst
.operands
[0].reg
;
13368 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13376 unsigned Rd
, Rn
, Rm
;
13378 Rd
= inst
.operands
[0].reg
;
13379 Rn
= inst
.operands
[1].reg
;
13380 Rm
= inst
.operands
[2].reg
;
13382 reject_bad_reg (Rd
);
13383 reject_bad_reg (Rn
);
13384 reject_bad_reg (Rm
);
13386 inst
.instruction
|= Rd
<< 8;
13387 inst
.instruction
|= Rn
<< 16;
13388 inst
.instruction
|= Rm
;
13394 unsigned Rd
, Rn
, Rm
;
13396 Rd
= inst
.operands
[0].reg
;
13397 Rm
= inst
.operands
[1].reg
;
13398 Rn
= inst
.operands
[2].reg
;
13400 reject_bad_reg (Rd
);
13401 reject_bad_reg (Rn
);
13402 reject_bad_reg (Rm
);
13404 inst
.instruction
|= Rd
<< 8;
13405 inst
.instruction
|= Rn
<< 16;
13406 inst
.instruction
|= Rm
;
13412 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13413 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
13414 _("SMC is not permitted on this architecture"));
13415 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13416 _("expression too complex"));
13417 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13418 inst
.instruction
|= (value
& 0xf000) >> 12;
13419 inst
.instruction
|= (value
& 0x0ff0);
13420 inst
.instruction
|= (value
& 0x000f) << 16;
13421 /* PR gas/15623: SMC instructions must be last in an IT block. */
13422 set_pred_insn_type_last ();
13428 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13430 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13431 inst
.instruction
|= (value
& 0x0fff);
13432 inst
.instruction
|= (value
& 0xf000) << 4;
13436 do_t_ssat_usat (int bias
)
13440 Rd
= inst
.operands
[0].reg
;
13441 Rn
= inst
.operands
[2].reg
;
13443 reject_bad_reg (Rd
);
13444 reject_bad_reg (Rn
);
13446 inst
.instruction
|= Rd
<< 8;
13447 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13448 inst
.instruction
|= Rn
<< 16;
13450 if (inst
.operands
[3].present
)
13452 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
13454 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13456 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13457 _("expression too complex"));
13459 if (shift_amount
!= 0)
13461 constraint (shift_amount
> 31,
13462 _("shift expression is too large"));
13464 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13465 inst
.instruction
|= 0x00200000; /* sh bit. */
13467 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13468 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13476 do_t_ssat_usat (1);
13484 Rd
= inst
.operands
[0].reg
;
13485 Rn
= inst
.operands
[2].reg
;
13487 reject_bad_reg (Rd
);
13488 reject_bad_reg (Rn
);
13490 inst
.instruction
|= Rd
<< 8;
13491 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13492 inst
.instruction
|= Rn
<< 16;
13498 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13499 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13500 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13501 || inst
.operands
[2].negative
,
13504 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13506 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13507 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13508 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13509 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13515 if (!inst
.operands
[2].present
)
13516 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13518 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13519 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13520 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13523 inst
.instruction
|= inst
.operands
[0].reg
;
13524 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13525 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13526 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13532 unsigned Rd
, Rn
, Rm
;
13534 Rd
= inst
.operands
[0].reg
;
13535 Rn
= inst
.operands
[1].reg
;
13536 Rm
= inst
.operands
[2].reg
;
13538 reject_bad_reg (Rd
);
13539 reject_bad_reg (Rn
);
13540 reject_bad_reg (Rm
);
13542 inst
.instruction
|= Rd
<< 8;
13543 inst
.instruction
|= Rn
<< 16;
13544 inst
.instruction
|= Rm
;
13545 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13553 Rd
= inst
.operands
[0].reg
;
13554 Rm
= inst
.operands
[1].reg
;
13556 reject_bad_reg (Rd
);
13557 reject_bad_reg (Rm
);
13559 if (inst
.instruction
<= 0xffff
13560 && inst
.size_req
!= 4
13561 && Rd
<= 7 && Rm
<= 7
13562 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13564 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13565 inst
.instruction
|= Rd
;
13566 inst
.instruction
|= Rm
<< 3;
13568 else if (unified_syntax
)
13570 if (inst
.instruction
<= 0xffff)
13571 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13572 inst
.instruction
|= Rd
<< 8;
13573 inst
.instruction
|= Rm
;
13574 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13578 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13579 _("Thumb encoding does not support rotation"));
13580 constraint (1, BAD_HIREG
);
13587 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
13596 half
= (inst
.instruction
& 0x10) != 0;
13597 set_pred_insn_type_last ();
13598 constraint (inst
.operands
[0].immisreg
,
13599 _("instruction requires register index"));
13601 Rn
= inst
.operands
[0].reg
;
13602 Rm
= inst
.operands
[0].imm
;
13604 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13605 constraint (Rn
== REG_SP
, BAD_SP
);
13606 reject_bad_reg (Rm
);
13608 constraint (!half
&& inst
.operands
[0].shifted
,
13609 _("instruction does not allow shifted index"));
13610 inst
.instruction
|= (Rn
<< 16) | Rm
;
13616 if (!inst
.operands
[0].present
)
13617 inst
.operands
[0].imm
= 0;
13619 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13621 constraint (inst
.size_req
== 2,
13622 _("immediate value out of range"));
13623 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13624 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13625 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13629 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13630 inst
.instruction
|= inst
.operands
[0].imm
;
13633 set_pred_insn_type (NEUTRAL_IT_INSN
);
13640 do_t_ssat_usat (0);
13648 Rd
= inst
.operands
[0].reg
;
13649 Rn
= inst
.operands
[2].reg
;
13651 reject_bad_reg (Rd
);
13652 reject_bad_reg (Rn
);
13654 inst
.instruction
|= Rd
<< 8;
13655 inst
.instruction
|= inst
.operands
[1].imm
;
13656 inst
.instruction
|= Rn
<< 16;
13659 /* Checking the range of the branch offset (VAL) with NBITS bits
13660 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13662 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
13664 gas_assert (nbits
> 0 && nbits
<= 32);
13667 int cmp
= (1 << (nbits
- 1));
13668 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
13673 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
13679 /* For branches in Armv8.1-M Mainline. */
13681 do_t_branch_future (void)
13683 unsigned long insn
= inst
.instruction
;
13685 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13686 if (inst
.operands
[0].hasreloc
== 0)
13688 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, FALSE
) == FAIL
)
13689 as_bad (BAD_BRANCH_OFF
);
13691 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
13695 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
13696 inst
.relocs
[0].pc_rel
= 1;
13702 if (inst
.operands
[1].hasreloc
== 0)
13704 int val
= inst
.operands
[1].imm
;
13705 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, TRUE
) == FAIL
)
13706 as_bad (BAD_BRANCH_OFF
);
13708 int immA
= (val
& 0x0001f000) >> 12;
13709 int immB
= (val
& 0x00000ffc) >> 2;
13710 int immC
= (val
& 0x00000002) >> 1;
13711 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13715 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
13716 inst
.relocs
[1].pc_rel
= 1;
13721 if (inst
.operands
[1].hasreloc
== 0)
13723 int val
= inst
.operands
[1].imm
;
13724 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, TRUE
) == FAIL
)
13725 as_bad (BAD_BRANCH_OFF
);
13727 int immA
= (val
& 0x0007f000) >> 12;
13728 int immB
= (val
& 0x00000ffc) >> 2;
13729 int immC
= (val
& 0x00000002) >> 1;
13730 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13734 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
13735 inst
.relocs
[1].pc_rel
= 1;
13739 case T_MNEM_bfcsel
:
13741 if (inst
.operands
[1].hasreloc
== 0)
13743 int val
= inst
.operands
[1].imm
;
13744 int immA
= (val
& 0x00001000) >> 12;
13745 int immB
= (val
& 0x00000ffc) >> 2;
13746 int immC
= (val
& 0x00000002) >> 1;
13747 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13751 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
13752 inst
.relocs
[1].pc_rel
= 1;
13756 if (inst
.operands
[2].hasreloc
== 0)
13758 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
13759 int val2
= inst
.operands
[2].imm
;
13760 int val0
= inst
.operands
[0].imm
& 0x1f;
13761 int diff
= val2
- val0
;
13763 inst
.instruction
|= 1 << 17; /* T bit. */
13764 else if (diff
!= 2)
13765 as_bad (_("out of range label-relative fixup value"));
13769 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
13770 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
13771 inst
.relocs
[2].pc_rel
= 1;
13775 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
13776 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
13781 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13788 /* Helper function for do_t_loloop to handle relocations. */
13790 v8_1_loop_reloc (int is_le
)
13792 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
13794 int value
= inst
.relocs
[0].exp
.X_add_number
;
13795 value
= (is_le
) ? -value
: value
;
13797 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
13798 as_bad (BAD_BRANCH_OFF
);
13802 immh
= (value
& 0x00000ffc) >> 2;
13803 imml
= (value
& 0x00000002) >> 1;
13805 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
13809 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
13810 inst
.relocs
[0].pc_rel
= 1;
13814 /* To handle the Scalar Low Overhead Loop instructions
13815 in Armv8.1-M Mainline. */
13819 unsigned long insn
= inst
.instruction
;
13821 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13822 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13828 if (!inst
.operands
[0].present
)
13829 inst
.instruction
|= 1 << 21;
13831 v8_1_loop_reloc (TRUE
);
13835 v8_1_loop_reloc (FALSE
);
13836 /* Fall through. */
13838 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
13839 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
13846 /* MVE instruction encoder helpers. */
13847 #define M_MNEM_vabav 0xee800f01
13848 #define M_MNEM_vmladav 0xeef00e00
13849 #define M_MNEM_vmladava 0xeef00e20
13850 #define M_MNEM_vmladavx 0xeef01e00
13851 #define M_MNEM_vmladavax 0xeef01e20
13852 #define M_MNEM_vmlsdav 0xeef00e01
13853 #define M_MNEM_vmlsdava 0xeef00e21
13854 #define M_MNEM_vmlsdavx 0xeef01e01
13855 #define M_MNEM_vmlsdavax 0xeef01e21
13856 #define M_MNEM_vmullt 0xee011e00
13857 #define M_MNEM_vmullb 0xee010e00
13859 /* Neon instruction encoder helpers. */
13861 /* Encodings for the different types for various Neon opcodes. */
13863 /* An "invalid" code for the following tables. */
13866 struct neon_tab_entry
13869 unsigned float_or_poly
;
13870 unsigned scalar_or_imm
;
13873 /* Map overloaded Neon opcodes to their respective encodings. */
13874 #define NEON_ENC_TAB \
13875 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13876 X(vabdl, 0x0800700, N_INV, N_INV), \
13877 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13878 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13879 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13880 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13881 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13882 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13883 X(vaddl, 0x0800000, N_INV, N_INV), \
13884 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13885 X(vsubl, 0x0800200, N_INV, N_INV), \
13886 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13887 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13888 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13889 /* Register variants of the following two instructions are encoded as
13890 vcge / vcgt with the operands reversed. */ \
13891 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13892 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13893 X(vfma, N_INV, 0x0000c10, N_INV), \
13894 X(vfms, N_INV, 0x0200c10, N_INV), \
13895 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13896 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13897 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13898 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13899 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13900 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13901 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13902 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13903 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13904 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13905 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13906 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13907 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13908 X(vshl, 0x0000400, N_INV, 0x0800510), \
13909 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13910 X(vand, 0x0000110, N_INV, 0x0800030), \
13911 X(vbic, 0x0100110, N_INV, 0x0800030), \
13912 X(veor, 0x1000110, N_INV, N_INV), \
13913 X(vorn, 0x0300110, N_INV, 0x0800010), \
13914 X(vorr, 0x0200110, N_INV, 0x0800010), \
13915 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13916 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13917 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13918 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13919 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13920 X(vst1, 0x0000000, 0x0800000, N_INV), \
13921 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13922 X(vst2, 0x0000100, 0x0800100, N_INV), \
13923 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13924 X(vst3, 0x0000200, 0x0800200, N_INV), \
13925 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13926 X(vst4, 0x0000300, 0x0800300, N_INV), \
13927 X(vmovn, 0x1b20200, N_INV, N_INV), \
13928 X(vtrn, 0x1b20080, N_INV, N_INV), \
13929 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13930 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13931 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13932 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13933 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13934 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13935 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13936 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13937 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13938 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13939 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13940 X(vseleq, 0xe000a00, N_INV, N_INV), \
13941 X(vselvs, 0xe100a00, N_INV, N_INV), \
13942 X(vselge, 0xe200a00, N_INV, N_INV), \
13943 X(vselgt, 0xe300a00, N_INV, N_INV), \
13944 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13945 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13946 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13947 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13948 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13949 X(aes, 0x3b00300, N_INV, N_INV), \
13950 X(sha3op, 0x2000c00, N_INV, N_INV), \
13951 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13952 X(sha2op, 0x3ba0380, N_INV, N_INV)
13956 #define X(OPC,I,F,S) N_MNEM_##OPC
13961 static const struct neon_tab_entry neon_enc_tab
[] =
13963 #define X(OPC,I,F,S) { (I), (F), (S) }
13968 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13969 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13970 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13971 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13972 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13973 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13974 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13975 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13976 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13977 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13978 #define NEON_ENC_SINGLE_(X) \
13979 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13980 #define NEON_ENC_DOUBLE_(X) \
13981 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13982 #define NEON_ENC_FPV8_(X) \
13983 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13985 #define NEON_ENCODE(type, inst) \
13988 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13989 inst.is_neon = 1; \
13993 #define check_neon_suffixes \
13996 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13998 as_bad (_("invalid neon suffix for non neon instruction")); \
14004 /* Define shapes for instruction operands. The following mnemonic characters
14005 are used in this table:
14007 F - VFP S<n> register
14008 D - Neon D<n> register
14009 Q - Neon Q<n> register
14013 L - D<n> register list
14015 This table is used to generate various data:
14016 - enumerations of the form NS_DDR to be used as arguments to
14018 - a table classifying shapes into single, double, quad, mixed.
14019 - a table used to drive neon_select_shape. */
14021 #define NEON_SHAPE_DEF \
14022 X(3, (R, Q, Q), QUAD), \
14023 X(3, (D, D, D), DOUBLE), \
14024 X(3, (Q, Q, Q), QUAD), \
14025 X(3, (D, D, I), DOUBLE), \
14026 X(3, (Q, Q, I), QUAD), \
14027 X(3, (D, D, S), DOUBLE), \
14028 X(3, (Q, Q, S), QUAD), \
14029 X(3, (Q, Q, R), QUAD), \
14030 X(2, (D, D), DOUBLE), \
14031 X(2, (Q, Q), QUAD), \
14032 X(2, (D, S), DOUBLE), \
14033 X(2, (Q, S), QUAD), \
14034 X(2, (D, R), DOUBLE), \
14035 X(2, (Q, R), QUAD), \
14036 X(2, (D, I), DOUBLE), \
14037 X(2, (Q, I), QUAD), \
14038 X(3, (D, L, D), DOUBLE), \
14039 X(2, (D, Q), MIXED), \
14040 X(2, (Q, D), MIXED), \
14041 X(3, (D, Q, I), MIXED), \
14042 X(3, (Q, D, I), MIXED), \
14043 X(3, (Q, D, D), MIXED), \
14044 X(3, (D, Q, Q), MIXED), \
14045 X(3, (Q, Q, D), MIXED), \
14046 X(3, (Q, D, S), MIXED), \
14047 X(3, (D, Q, S), MIXED), \
14048 X(4, (D, D, D, I), DOUBLE), \
14049 X(4, (Q, Q, Q, I), QUAD), \
14050 X(4, (D, D, S, I), DOUBLE), \
14051 X(4, (Q, Q, S, I), QUAD), \
14052 X(2, (F, F), SINGLE), \
14053 X(3, (F, F, F), SINGLE), \
14054 X(2, (F, I), SINGLE), \
14055 X(2, (F, D), MIXED), \
14056 X(2, (D, F), MIXED), \
14057 X(3, (F, F, I), MIXED), \
14058 X(4, (R, R, F, F), SINGLE), \
14059 X(4, (F, F, R, R), SINGLE), \
14060 X(3, (D, R, R), DOUBLE), \
14061 X(3, (R, R, D), DOUBLE), \
14062 X(2, (S, R), SINGLE), \
14063 X(2, (R, S), SINGLE), \
14064 X(2, (F, R), SINGLE), \
14065 X(2, (R, F), SINGLE), \
14066 /* Half float shape supported so far. */\
14067 X (2, (H, D), MIXED), \
14068 X (2, (D, H), MIXED), \
14069 X (2, (H, F), MIXED), \
14070 X (2, (F, H), MIXED), \
14071 X (2, (H, H), HALF), \
14072 X (2, (H, R), HALF), \
14073 X (2, (R, H), HALF), \
14074 X (2, (H, I), HALF), \
14075 X (3, (H, H, H), HALF), \
14076 X (3, (H, F, I), MIXED), \
14077 X (3, (F, H, I), MIXED), \
14078 X (3, (D, H, H), MIXED), \
14079 X (3, (D, H, S), MIXED)
14081 #define S2(A,B) NS_##A##B
14082 #define S3(A,B,C) NS_##A##B##C
14083 #define S4(A,B,C,D) NS_##A##B##C##D
14085 #define X(N, L, C) S##N L
14098 enum neon_shape_class
14107 #define X(N, L, C) SC_##C
14109 static enum neon_shape_class neon_shape_class
[] =
14128 /* Register widths of above. */
14129 static unsigned neon_shape_el_size
[] =
14141 struct neon_shape_info
14144 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
14147 #define S2(A,B) { SE_##A, SE_##B }
14148 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14149 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14151 #define X(N, L, C) { N, S##N L }
14153 static struct neon_shape_info neon_shape_tab
[] =
14163 /* Bit masks used in type checking given instructions.
14164 'N_EQK' means the type must be the same as (or based on in some way) the key
14165 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14166 set, various other bits can be set as well in order to modify the meaning of
14167 the type constraint. */
14169 enum neon_type_mask
14193 N_KEY
= 0x1000000, /* Key element (main type specifier). */
14194 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
14195 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
14196 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
14197 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
14198 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
14199 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14200 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14201 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14202 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
14203 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14205 N_MAX_NONSPECIAL
= N_P64
14208 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14210 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14211 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14212 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14213 #define N_S_32 (N_S8 | N_S16 | N_S32)
14214 #define N_F_16_32 (N_F16 | N_F32)
14215 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14216 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14217 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14218 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14219 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14220 #define N_F_MVE (N_F16 | N_F32)
14221 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14223 /* Pass this as the first type argument to neon_check_type to ignore types
14225 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14227 /* Select a "shape" for the current instruction (describing register types or
14228 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14229 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14230 function of operand parsing, so this function doesn't need to be called.
14231 Shapes should be listed in order of decreasing length. */
14233 static enum neon_shape
14234 neon_select_shape (enum neon_shape shape
, ...)
14237 enum neon_shape first_shape
= shape
;
14239 /* Fix missing optional operands. FIXME: we don't know at this point how
14240 many arguments we should have, so this makes the assumption that we have
14241 > 1. This is true of all current Neon opcodes, I think, but may not be
14242 true in the future. */
14243 if (!inst
.operands
[1].present
)
14244 inst
.operands
[1] = inst
.operands
[0];
14246 va_start (ap
, shape
);
14248 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
14253 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
14255 if (!inst
.operands
[j
].present
)
14261 switch (neon_shape_tab
[shape
].el
[j
])
14263 /* If a .f16, .16, .u16, .s16 type specifier is given over
14264 a VFP single precision register operand, it's essentially
14265 means only half of the register is used.
14267 If the type specifier is given after the mnemonics, the
14268 information is stored in inst.vectype. If the type specifier
14269 is given after register operand, the information is stored
14270 in inst.operands[].vectype.
14272 When there is only one type specifier, and all the register
14273 operands are the same type of hardware register, the type
14274 specifier applies to all register operands.
14276 If no type specifier is given, the shape is inferred from
14277 operand information.
14280 vadd.f16 s0, s1, s2: NS_HHH
14281 vabs.f16 s0, s1: NS_HH
14282 vmov.f16 s0, r1: NS_HR
14283 vmov.f16 r0, s1: NS_RH
14284 vcvt.f16 r0, s1: NS_RH
14285 vcvt.f16.s32 s2, s2, #29: NS_HFI
14286 vcvt.f16.s32 s2, s2: NS_HF
14289 if (!(inst
.operands
[j
].isreg
14290 && inst
.operands
[j
].isvec
14291 && inst
.operands
[j
].issingle
14292 && !inst
.operands
[j
].isquad
14293 && ((inst
.vectype
.elems
== 1
14294 && inst
.vectype
.el
[0].size
== 16)
14295 || (inst
.vectype
.elems
> 1
14296 && inst
.vectype
.el
[j
].size
== 16)
14297 || (inst
.vectype
.elems
== 0
14298 && inst
.operands
[j
].vectype
.type
!= NT_invtype
14299 && inst
.operands
[j
].vectype
.size
== 16))))
14304 if (!(inst
.operands
[j
].isreg
14305 && inst
.operands
[j
].isvec
14306 && inst
.operands
[j
].issingle
14307 && !inst
.operands
[j
].isquad
14308 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
14309 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
14310 || (inst
.vectype
.elems
== 0
14311 && (inst
.operands
[j
].vectype
.size
== 32
14312 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
14317 if (!(inst
.operands
[j
].isreg
14318 && inst
.operands
[j
].isvec
14319 && !inst
.operands
[j
].isquad
14320 && !inst
.operands
[j
].issingle
))
14325 if (!(inst
.operands
[j
].isreg
14326 && !inst
.operands
[j
].isvec
))
14331 if (!(inst
.operands
[j
].isreg
14332 && inst
.operands
[j
].isvec
14333 && inst
.operands
[j
].isquad
14334 && !inst
.operands
[j
].issingle
))
14339 if (!(!inst
.operands
[j
].isreg
14340 && !inst
.operands
[j
].isscalar
))
14345 if (!(!inst
.operands
[j
].isreg
14346 && inst
.operands
[j
].isscalar
))
14356 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
14357 /* We've matched all the entries in the shape table, and we don't
14358 have any left over operands which have not been matched. */
14364 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
14365 first_error (_("invalid instruction shape"));
14370 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14371 means the Q bit should be set). */
14374 neon_quad (enum neon_shape shape
)
14376 return neon_shape_class
[shape
] == SC_QUAD
;
14380 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
14383 /* Allow modification to be made to types which are constrained to be
14384 based on the key element, based on bits set alongside N_EQK. */
14385 if ((typebits
& N_EQK
) != 0)
14387 if ((typebits
& N_HLF
) != 0)
14389 else if ((typebits
& N_DBL
) != 0)
14391 if ((typebits
& N_SGN
) != 0)
14392 *g_type
= NT_signed
;
14393 else if ((typebits
& N_UNS
) != 0)
14394 *g_type
= NT_unsigned
;
14395 else if ((typebits
& N_INT
) != 0)
14396 *g_type
= NT_integer
;
14397 else if ((typebits
& N_FLT
) != 0)
14398 *g_type
= NT_float
;
14399 else if ((typebits
& N_SIZ
) != 0)
14400 *g_type
= NT_untyped
;
14404 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14405 operand type, i.e. the single type specified in a Neon instruction when it
14406 is the only one given. */
14408 static struct neon_type_el
14409 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
14411 struct neon_type_el dest
= *key
;
14413 gas_assert ((thisarg
& N_EQK
) != 0);
14415 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
14420 /* Convert Neon type and size into compact bitmask representation. */
14422 static enum neon_type_mask
14423 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
14430 case 8: return N_8
;
14431 case 16: return N_16
;
14432 case 32: return N_32
;
14433 case 64: return N_64
;
14441 case 8: return N_I8
;
14442 case 16: return N_I16
;
14443 case 32: return N_I32
;
14444 case 64: return N_I64
;
14452 case 16: return N_F16
;
14453 case 32: return N_F32
;
14454 case 64: return N_F64
;
14462 case 8: return N_P8
;
14463 case 16: return N_P16
;
14464 case 64: return N_P64
;
14472 case 8: return N_S8
;
14473 case 16: return N_S16
;
14474 case 32: return N_S32
;
14475 case 64: return N_S64
;
14483 case 8: return N_U8
;
14484 case 16: return N_U16
;
14485 case 32: return N_U32
;
14486 case 64: return N_U64
;
14497 /* Convert compact Neon bitmask type representation to a type and size. Only
14498 handles the case where a single bit is set in the mask. */
14501 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
14502 enum neon_type_mask mask
)
14504 if ((mask
& N_EQK
) != 0)
14507 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
14509 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
14511 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
14513 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
14518 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
14520 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
14521 *type
= NT_unsigned
;
14522 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
14523 *type
= NT_integer
;
14524 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
14525 *type
= NT_untyped
;
14526 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
14528 else if ((mask
& (N_F_ALL
)) != 0)
14536 /* Modify a bitmask of allowed types. This is only needed for type
14540 modify_types_allowed (unsigned allowed
, unsigned mods
)
14543 enum neon_el_type type
;
14549 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
14551 if (el_type_of_type_chk (&type
, &size
,
14552 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
14554 neon_modify_type_size (mods
, &type
, &size
);
14555 destmask
|= type_chk_of_el_type (type
, size
);
14562 /* Check type and return type classification.
14563 The manual states (paraphrase): If one datatype is given, it indicates the
14565 - the second operand, if there is one
14566 - the operand, if there is no second operand
14567 - the result, if there are no operands.
14568 This isn't quite good enough though, so we use a concept of a "key" datatype
14569 which is set on a per-instruction basis, which is the one which matters when
14570 only one data type is written.
14571 Note: this function has side-effects (e.g. filling in missing operands). All
14572 Neon instructions should call it before performing bit encoding. */
14574 static struct neon_type_el
14575 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
14578 unsigned i
, pass
, key_el
= 0;
14579 unsigned types
[NEON_MAX_TYPE_ELS
];
14580 enum neon_el_type k_type
= NT_invtype
;
14581 unsigned k_size
= -1u;
14582 struct neon_type_el badtype
= {NT_invtype
, -1};
14583 unsigned key_allowed
= 0;
14585 /* Optional registers in Neon instructions are always (not) in operand 1.
14586 Fill in the missing operand here, if it was omitted. */
14587 if (els
> 1 && !inst
.operands
[1].present
)
14588 inst
.operands
[1] = inst
.operands
[0];
14590 /* Suck up all the varargs. */
14592 for (i
= 0; i
< els
; i
++)
14594 unsigned thisarg
= va_arg (ap
, unsigned);
14595 if (thisarg
== N_IGNORE_TYPE
)
14600 types
[i
] = thisarg
;
14601 if ((thisarg
& N_KEY
) != 0)
14606 if (inst
.vectype
.elems
> 0)
14607 for (i
= 0; i
< els
; i
++)
14608 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
14610 first_error (_("types specified in both the mnemonic and operands"));
14614 /* Duplicate inst.vectype elements here as necessary.
14615 FIXME: No idea if this is exactly the same as the ARM assembler,
14616 particularly when an insn takes one register and one non-register
14618 if (inst
.vectype
.elems
== 1 && els
> 1)
14621 inst
.vectype
.elems
= els
;
14622 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
14623 for (j
= 0; j
< els
; j
++)
14625 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14628 else if (inst
.vectype
.elems
== 0 && els
> 0)
14631 /* No types were given after the mnemonic, so look for types specified
14632 after each operand. We allow some flexibility here; as long as the
14633 "key" operand has a type, we can infer the others. */
14634 for (j
= 0; j
< els
; j
++)
14635 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
14636 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
14638 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
14640 for (j
= 0; j
< els
; j
++)
14641 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
14642 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14647 first_error (_("operand types can't be inferred"));
14651 else if (inst
.vectype
.elems
!= els
)
14653 first_error (_("type specifier has the wrong number of parts"));
14657 for (pass
= 0; pass
< 2; pass
++)
14659 for (i
= 0; i
< els
; i
++)
14661 unsigned thisarg
= types
[i
];
14662 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14663 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14664 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14665 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14667 /* Decay more-specific signed & unsigned types to sign-insensitive
14668 integer types if sign-specific variants are unavailable. */
14669 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14670 && (types_allowed
& N_SU_ALL
) == 0)
14671 g_type
= NT_integer
;
14673 /* If only untyped args are allowed, decay any more specific types to
14674 them. Some instructions only care about signs for some element
14675 sizes, so handle that properly. */
14676 if (((types_allowed
& N_UNT
) == 0)
14677 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14678 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14679 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14680 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14681 g_type
= NT_untyped
;
14685 if ((thisarg
& N_KEY
) != 0)
14689 key_allowed
= thisarg
& ~N_KEY
;
14691 /* Check architecture constraint on FP16 extension. */
14693 && k_type
== NT_float
14694 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14696 inst
.error
= _(BAD_FP16
);
14703 if ((thisarg
& N_VFP
) != 0)
14705 enum neon_shape_el regshape
;
14706 unsigned regwidth
, match
;
14708 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14711 first_error (_("invalid instruction shape"));
14714 regshape
= neon_shape_tab
[ns
].el
[i
];
14715 regwidth
= neon_shape_el_size
[regshape
];
14717 /* In VFP mode, operands must match register widths. If we
14718 have a key operand, use its width, else use the width of
14719 the current operand. */
14725 /* FP16 will use a single precision register. */
14726 if (regwidth
== 32 && match
== 16)
14728 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14732 inst
.error
= _(BAD_FP16
);
14737 if (regwidth
!= match
)
14739 first_error (_("operand size must match register width"));
14744 if ((thisarg
& N_EQK
) == 0)
14746 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14748 if ((given_type
& types_allowed
) == 0)
14750 first_error (BAD_SIMD_TYPE
);
14756 enum neon_el_type mod_k_type
= k_type
;
14757 unsigned mod_k_size
= k_size
;
14758 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14759 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14761 first_error (_("inconsistent types in Neon instruction"));
14769 return inst
.vectype
.el
[key_el
];
14772 /* Neon-style VFP instruction forwarding. */
14774 /* Thumb VFP instructions have 0xE in the condition field. */
14777 do_vfp_cond_or_thumb (void)
14782 inst
.instruction
|= 0xe0000000;
14784 inst
.instruction
|= inst
.cond
<< 28;
14787 /* Look up and encode a simple mnemonic, for use as a helper function for the
14788 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14789 etc. It is assumed that operand parsing has already been done, and that the
14790 operands are in the form expected by the given opcode (this isn't necessarily
14791 the same as the form in which they were parsed, hence some massaging must
14792 take place before this function is called).
14793 Checks current arch version against that in the looked-up opcode. */
14796 do_vfp_nsyn_opcode (const char *opname
)
14798 const struct asm_opcode
*opcode
;
14800 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14805 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14806 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14813 inst
.instruction
= opcode
->tvalue
;
14814 opcode
->tencode ();
14818 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14819 opcode
->aencode ();
14824 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14826 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14828 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14831 do_vfp_nsyn_opcode ("fadds");
14833 do_vfp_nsyn_opcode ("fsubs");
14835 /* ARMv8.2 fp16 instruction. */
14837 do_scalar_fp16_v82_encode ();
14842 do_vfp_nsyn_opcode ("faddd");
14844 do_vfp_nsyn_opcode ("fsubd");
14848 /* Check operand types to see if this is a VFP instruction, and if so call
14852 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14854 enum neon_shape rs
;
14855 struct neon_type_el et
;
14860 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14861 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14865 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14866 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14867 N_F_ALL
| N_KEY
| N_VFP
);
14874 if (et
.type
!= NT_invtype
)
14885 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14887 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14889 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14892 do_vfp_nsyn_opcode ("fmacs");
14894 do_vfp_nsyn_opcode ("fnmacs");
14896 /* ARMv8.2 fp16 instruction. */
14898 do_scalar_fp16_v82_encode ();
14903 do_vfp_nsyn_opcode ("fmacd");
14905 do_vfp_nsyn_opcode ("fnmacd");
14910 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14912 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14914 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14917 do_vfp_nsyn_opcode ("ffmas");
14919 do_vfp_nsyn_opcode ("ffnmas");
14921 /* ARMv8.2 fp16 instruction. */
14923 do_scalar_fp16_v82_encode ();
14928 do_vfp_nsyn_opcode ("ffmad");
14930 do_vfp_nsyn_opcode ("ffnmad");
14935 do_vfp_nsyn_mul (enum neon_shape rs
)
14937 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14939 do_vfp_nsyn_opcode ("fmuls");
14941 /* ARMv8.2 fp16 instruction. */
14943 do_scalar_fp16_v82_encode ();
14946 do_vfp_nsyn_opcode ("fmuld");
14950 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14952 int is_neg
= (inst
.instruction
& 0x80) != 0;
14953 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14955 if (rs
== NS_FF
|| rs
== NS_HH
)
14958 do_vfp_nsyn_opcode ("fnegs");
14960 do_vfp_nsyn_opcode ("fabss");
14962 /* ARMv8.2 fp16 instruction. */
14964 do_scalar_fp16_v82_encode ();
14969 do_vfp_nsyn_opcode ("fnegd");
14971 do_vfp_nsyn_opcode ("fabsd");
14975 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14976 insns belong to Neon, and are handled elsewhere. */
14979 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14981 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14985 do_vfp_nsyn_opcode ("fldmdbs");
14987 do_vfp_nsyn_opcode ("fldmias");
14992 do_vfp_nsyn_opcode ("fstmdbs");
14994 do_vfp_nsyn_opcode ("fstmias");
14999 do_vfp_nsyn_sqrt (void)
15001 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15002 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15004 if (rs
== NS_FF
|| rs
== NS_HH
)
15006 do_vfp_nsyn_opcode ("fsqrts");
15008 /* ARMv8.2 fp16 instruction. */
15010 do_scalar_fp16_v82_encode ();
15013 do_vfp_nsyn_opcode ("fsqrtd");
15017 do_vfp_nsyn_div (void)
15019 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15020 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15021 N_F_ALL
| N_KEY
| N_VFP
);
15023 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15025 do_vfp_nsyn_opcode ("fdivs");
15027 /* ARMv8.2 fp16 instruction. */
15029 do_scalar_fp16_v82_encode ();
15032 do_vfp_nsyn_opcode ("fdivd");
15036 do_vfp_nsyn_nmul (void)
15038 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15039 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15040 N_F_ALL
| N_KEY
| N_VFP
);
15042 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15044 NEON_ENCODE (SINGLE
, inst
);
15045 do_vfp_sp_dyadic ();
15047 /* ARMv8.2 fp16 instruction. */
15049 do_scalar_fp16_v82_encode ();
15053 NEON_ENCODE (DOUBLE
, inst
);
15054 do_vfp_dp_rd_rn_rm ();
15056 do_vfp_cond_or_thumb ();
15061 do_vfp_nsyn_cmp (void)
15063 enum neon_shape rs
;
15064 if (inst
.operands
[1].isreg
)
15066 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15067 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15069 if (rs
== NS_FF
|| rs
== NS_HH
)
15071 NEON_ENCODE (SINGLE
, inst
);
15072 do_vfp_sp_monadic ();
15076 NEON_ENCODE (DOUBLE
, inst
);
15077 do_vfp_dp_rd_rm ();
15082 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
15083 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
15085 switch (inst
.instruction
& 0x0fffffff)
15088 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
15091 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
15097 if (rs
== NS_FI
|| rs
== NS_HI
)
15099 NEON_ENCODE (SINGLE
, inst
);
15100 do_vfp_sp_compare_z ();
15104 NEON_ENCODE (DOUBLE
, inst
);
15108 do_vfp_cond_or_thumb ();
15110 /* ARMv8.2 fp16 instruction. */
15111 if (rs
== NS_HI
|| rs
== NS_HH
)
15112 do_scalar_fp16_v82_encode ();
15116 nsyn_insert_sp (void)
15118 inst
.operands
[1] = inst
.operands
[0];
15119 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
15120 inst
.operands
[0].reg
= REG_SP
;
15121 inst
.operands
[0].isreg
= 1;
15122 inst
.operands
[0].writeback
= 1;
15123 inst
.operands
[0].present
= 1;
15127 do_vfp_nsyn_push (void)
15131 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15132 _("register list must contain at least 1 and at most 16 "
15135 if (inst
.operands
[1].issingle
)
15136 do_vfp_nsyn_opcode ("fstmdbs");
15138 do_vfp_nsyn_opcode ("fstmdbd");
15142 do_vfp_nsyn_pop (void)
15146 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15147 _("register list must contain at least 1 and at most 16 "
15150 if (inst
.operands
[1].issingle
)
15151 do_vfp_nsyn_opcode ("fldmias");
15153 do_vfp_nsyn_opcode ("fldmiad");
15156 /* Fix up Neon data-processing instructions, ORing in the correct bits for
15157 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
15160 neon_dp_fixup (struct arm_it
* insn
)
15162 unsigned int i
= insn
->instruction
;
15167 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15178 insn
->instruction
= i
;
15181 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15185 neon_logbits (unsigned x
)
15187 return ffs (x
) - 4;
15190 #define LOW4(R) ((R) & 0xf)
15191 #define HI1(R) (((R) >> 4) & 1)
15194 mve_encode_qqr (int size
, int fp
)
15196 if (inst
.operands
[2].reg
== REG_SP
)
15197 as_tsktsk (MVE_BAD_SP
);
15198 else if (inst
.operands
[2].reg
== REG_PC
)
15199 as_tsktsk (MVE_BAD_PC
);
15204 if (((unsigned)inst
.instruction
) == 0xd00)
15205 inst
.instruction
= 0xee300f40;
15207 else if (((unsigned)inst
.instruction
) == 0x200d00)
15208 inst
.instruction
= 0xee301f40;
15210 /* Setting size which is 1 for F16 and 0 for F32. */
15211 inst
.instruction
|= (size
== 16) << 28;
15216 if (((unsigned)inst
.instruction
) == 0x800)
15217 inst
.instruction
= 0xee010f40;
15219 else if (((unsigned)inst
.instruction
) == 0x1000800)
15220 inst
.instruction
= 0xee011f40;
15221 /* Setting bits for size. */
15222 inst
.instruction
|= neon_logbits (size
) << 20;
15224 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15225 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15226 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15227 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15228 inst
.instruction
|= inst
.operands
[2].reg
;
15233 mve_encode_rqq (unsigned bit28
, unsigned size
)
15235 inst
.instruction
|= bit28
<< 28;
15236 inst
.instruction
|= neon_logbits (size
) << 20;
15237 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15238 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
15239 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15240 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15241 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15246 mve_encode_qqq (int ubit
, int size
)
15249 inst
.instruction
|= (ubit
!= 0) << 28;
15250 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15251 inst
.instruction
|= neon_logbits (size
) << 20;
15252 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15253 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15254 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15255 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15256 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15262 /* Encode insns with bit pattern:
15264 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15265 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15267 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15268 different meaning for some instruction. */
15271 neon_three_same (int isquad
, int ubit
, int size
)
15273 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15274 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15275 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15276 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15277 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15278 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15279 inst
.instruction
|= (isquad
!= 0) << 6;
15280 inst
.instruction
|= (ubit
!= 0) << 24;
15282 inst
.instruction
|= neon_logbits (size
) << 20;
15284 neon_dp_fixup (&inst
);
15287 /* Encode instructions of the form:
15289 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15290 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15292 Don't write size if SIZE == -1. */
15295 neon_two_same (int qbit
, int ubit
, int size
)
15297 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15298 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15299 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15300 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15301 inst
.instruction
|= (qbit
!= 0) << 6;
15302 inst
.instruction
|= (ubit
!= 0) << 24;
15305 inst
.instruction
|= neon_logbits (size
) << 18;
15307 neon_dp_fixup (&inst
);
15310 /* Neon instruction encoders, in approximate order of appearance. */
15313 do_neon_dyadic_i_su (void)
15315 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15316 struct neon_type_el et
= neon_check_type (3, rs
,
15317 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
15318 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15322 do_neon_dyadic_i64_su (void)
15324 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15325 struct neon_type_el et
= neon_check_type (3, rs
,
15326 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15327 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15331 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
15334 unsigned size
= et
.size
>> 3;
15335 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15336 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15337 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15338 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15339 inst
.instruction
|= (isquad
!= 0) << 6;
15340 inst
.instruction
|= immbits
<< 16;
15341 inst
.instruction
|= (size
>> 3) << 7;
15342 inst
.instruction
|= (size
& 0x7) << 19;
15344 inst
.instruction
|= (uval
!= 0) << 24;
15346 neon_dp_fixup (&inst
);
15350 do_neon_shl_imm (void)
15352 if (!inst
.operands
[2].isreg
)
15354 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15355 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
15356 int imm
= inst
.operands
[2].imm
;
15358 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15359 _("immediate out of range for shift"));
15360 NEON_ENCODE (IMMED
, inst
);
15361 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15365 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15366 struct neon_type_el et
= neon_check_type (3, rs
,
15367 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15370 /* VSHL/VQSHL 3-register variants have syntax such as:
15372 whereas other 3-register operations encoded by neon_three_same have
15375 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15377 tmp
= inst
.operands
[2].reg
;
15378 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15379 inst
.operands
[1].reg
= tmp
;
15380 NEON_ENCODE (INTEGER
, inst
);
15381 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15386 do_neon_qshl_imm (void)
15388 if (!inst
.operands
[2].isreg
)
15390 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15391 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15392 int imm
= inst
.operands
[2].imm
;
15394 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15395 _("immediate out of range for shift"));
15396 NEON_ENCODE (IMMED
, inst
);
15397 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
15401 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15402 struct neon_type_el et
= neon_check_type (3, rs
,
15403 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15406 /* See note in do_neon_shl_imm. */
15407 tmp
= inst
.operands
[2].reg
;
15408 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15409 inst
.operands
[1].reg
= tmp
;
15410 NEON_ENCODE (INTEGER
, inst
);
15411 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15416 do_neon_rshl (void)
15418 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15419 struct neon_type_el et
= neon_check_type (3, rs
,
15420 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15423 tmp
= inst
.operands
[2].reg
;
15424 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15425 inst
.operands
[1].reg
= tmp
;
15426 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15430 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
15432 /* Handle .I8 pseudo-instructions. */
15435 /* Unfortunately, this will make everything apart from zero out-of-range.
15436 FIXME is this the intended semantics? There doesn't seem much point in
15437 accepting .I8 if so. */
15438 immediate
|= immediate
<< 8;
15444 if (immediate
== (immediate
& 0x000000ff))
15446 *immbits
= immediate
;
15449 else if (immediate
== (immediate
& 0x0000ff00))
15451 *immbits
= immediate
>> 8;
15454 else if (immediate
== (immediate
& 0x00ff0000))
15456 *immbits
= immediate
>> 16;
15459 else if (immediate
== (immediate
& 0xff000000))
15461 *immbits
= immediate
>> 24;
15464 if ((immediate
& 0xffff) != (immediate
>> 16))
15465 goto bad_immediate
;
15466 immediate
&= 0xffff;
15469 if (immediate
== (immediate
& 0x000000ff))
15471 *immbits
= immediate
;
15474 else if (immediate
== (immediate
& 0x0000ff00))
15476 *immbits
= immediate
>> 8;
15481 first_error (_("immediate value out of range"));
15486 do_neon_logic (void)
15488 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
15490 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15491 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15492 /* U bit and size field were set as part of the bitmask. */
15493 NEON_ENCODE (INTEGER
, inst
);
15494 neon_three_same (neon_quad (rs
), 0, -1);
15498 const int three_ops_form
= (inst
.operands
[2].present
15499 && !inst
.operands
[2].isreg
);
15500 const int immoperand
= (three_ops_form
? 2 : 1);
15501 enum neon_shape rs
= (three_ops_form
15502 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
15503 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
15504 struct neon_type_el et
= neon_check_type (2, rs
,
15505 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15506 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
15510 if (et
.type
== NT_invtype
)
15513 if (three_ops_form
)
15514 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15515 _("first and second operands shall be the same register"));
15517 NEON_ENCODE (IMMED
, inst
);
15519 immbits
= inst
.operands
[immoperand
].imm
;
15522 /* .i64 is a pseudo-op, so the immediate must be a repeating
15524 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
15525 inst
.operands
[immoperand
].reg
: 0))
15527 /* Set immbits to an invalid constant. */
15528 immbits
= 0xdeadbeef;
15535 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15539 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15543 /* Pseudo-instruction for VBIC. */
15544 neon_invert_size (&immbits
, 0, et
.size
);
15545 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15549 /* Pseudo-instruction for VORR. */
15550 neon_invert_size (&immbits
, 0, et
.size
);
15551 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15561 inst
.instruction
|= neon_quad (rs
) << 6;
15562 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15563 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15564 inst
.instruction
|= cmode
<< 8;
15565 neon_write_immbits (immbits
);
15567 neon_dp_fixup (&inst
);
15572 do_neon_bitfield (void)
15574 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15575 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15576 neon_three_same (neon_quad (rs
), 0, -1);
15580 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
15583 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15584 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
15586 if (et
.type
== NT_float
)
15588 NEON_ENCODE (FLOAT
, inst
);
15590 mve_encode_qqr (et
.size
, 1);
15592 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15596 NEON_ENCODE (INTEGER
, inst
);
15598 mve_encode_qqr (et
.size
, 0);
15600 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
15606 do_neon_dyadic_if_su_d (void)
15608 /* This version only allow D registers, but that constraint is enforced during
15609 operand parsing so we don't need to do anything extra here. */
15610 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15614 do_neon_dyadic_if_i_d (void)
15616 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15617 affected if we specify unsigned args. */
15618 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15621 enum vfp_or_neon_is_neon_bits
15624 NEON_CHECK_ARCH
= 2,
15625 NEON_CHECK_ARCH8
= 4
15628 /* Call this function if an instruction which may have belonged to the VFP or
15629 Neon instruction sets, but turned out to be a Neon instruction (due to the
15630 operand types involved, etc.). We have to check and/or fix-up a couple of
15633 - Make sure the user hasn't attempted to make a Neon instruction
15635 - Alter the value in the condition code field if necessary.
15636 - Make sure that the arch supports Neon instructions.
15638 Which of these operations take place depends on bits from enum
15639 vfp_or_neon_is_neon_bits.
15641 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15642 current instruction's condition is COND_ALWAYS, the condition field is
15643 changed to inst.uncond_value. This is necessary because instructions shared
15644 between VFP and Neon may be conditional for the VFP variants only, and the
15645 unconditional Neon version must have, e.g., 0xF in the condition field. */
15648 vfp_or_neon_is_neon (unsigned check
)
15650 /* Conditions are always legal in Thumb mode (IT blocks). */
15651 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
15653 if (inst
.cond
!= COND_ALWAYS
)
15655 first_error (_(BAD_COND
));
15658 if (inst
.uncond_value
!= -1)
15659 inst
.instruction
|= inst
.uncond_value
<< 28;
15663 if (((check
& NEON_CHECK_ARCH
) && !mark_feature_used (&fpu_neon_ext_v1
))
15664 || ((check
& NEON_CHECK_ARCH8
)
15665 && !mark_feature_used (&fpu_neon_ext_armv8
)))
15667 first_error (_(BAD_FPU
));
15675 check_simd_pred_availability (int fp
, unsigned check
)
15677 if (inst
.cond
> COND_ALWAYS
)
15679 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15681 inst
.error
= BAD_FPU
;
15684 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
15686 else if (inst
.cond
< COND_ALWAYS
)
15688 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15689 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15690 else if (vfp_or_neon_is_neon (check
) == FAIL
)
15695 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fp
? mve_fp_ext
: mve_ext
)
15696 && vfp_or_neon_is_neon (check
) == FAIL
)
15699 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15700 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15706 do_neon_dyadic_if_su (void)
15708 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15709 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15712 if (check_simd_pred_availability (et
.type
== NT_float
,
15713 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
15716 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15720 do_neon_addsub_if_i (void)
15722 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
15723 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
15726 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15727 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
,
15728 N_EQK
, N_IF_32
| N_I64
| N_KEY
);
15730 constraint (rs
== NS_QQR
&& et
.size
== 64, BAD_FPU
);
15731 /* If we are parsing Q registers and the element types match MVE, which NEON
15732 also supports, then we must check whether this is an instruction that can
15733 be used by both MVE/NEON. This distinction can be made based on whether
15734 they are predicated or not. */
15735 if ((rs
== NS_QQQ
|| rs
== NS_QQR
) && et
.size
!= 64)
15737 if (check_simd_pred_availability (et
.type
== NT_float
,
15738 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
15743 /* If they are either in a D register or are using an unsupported. */
15745 && vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15749 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15750 affected if we specify unsigned args. */
15751 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
15754 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15756 V<op> A,B (A is operand 0, B is operand 2)
15761 so handle that case specially. */
15764 neon_exchange_operands (void)
15766 if (inst
.operands
[1].present
)
15768 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
15770 /* Swap operands[1] and operands[2]. */
15771 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
15772 inst
.operands
[1] = inst
.operands
[2];
15773 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
15778 inst
.operands
[1] = inst
.operands
[2];
15779 inst
.operands
[2] = inst
.operands
[0];
15784 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
15786 if (inst
.operands
[2].isreg
)
15789 neon_exchange_operands ();
15790 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
15794 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15795 struct neon_type_el et
= neon_check_type (2, rs
,
15796 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
15798 NEON_ENCODE (IMMED
, inst
);
15799 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15800 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15801 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15802 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15803 inst
.instruction
|= neon_quad (rs
) << 6;
15804 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15805 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15807 neon_dp_fixup (&inst
);
15814 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
15818 do_neon_cmp_inv (void)
15820 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
15826 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
15829 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15830 scalars, which are encoded in 5 bits, M : Rm.
15831 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15832 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15835 Dot Product instructions are similar to multiply instructions except elsize
15836 should always be 32.
15838 This function translates SCALAR, which is GAS's internal encoding of indexed
15839 scalar register, to raw encoding. There is also register and index range
15840 check based on ELSIZE. */
15843 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
15845 unsigned regno
= NEON_SCALAR_REG (scalar
);
15846 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15851 if (regno
> 7 || elno
> 3)
15853 return regno
| (elno
<< 3);
15856 if (regno
> 15 || elno
> 1)
15858 return regno
| (elno
<< 4);
15862 first_error (_("scalar out of range for multiply instruction"));
15868 /* Encode multiply / multiply-accumulate scalar instructions. */
15871 neon_mul_mac (struct neon_type_el et
, int ubit
)
15875 /* Give a more helpful error message if we have an invalid type. */
15876 if (et
.type
== NT_invtype
)
15879 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15880 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15881 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15882 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15883 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15884 inst
.instruction
|= LOW4 (scalar
);
15885 inst
.instruction
|= HI1 (scalar
) << 5;
15886 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15887 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15888 inst
.instruction
|= (ubit
!= 0) << 24;
15890 neon_dp_fixup (&inst
);
15894 do_neon_mac_maybe_scalar (void)
15896 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15899 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15902 if (inst
.operands
[2].isscalar
)
15904 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15905 struct neon_type_el et
= neon_check_type (3, rs
,
15906 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15907 NEON_ENCODE (SCALAR
, inst
);
15908 neon_mul_mac (et
, neon_quad (rs
));
15912 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15913 affected if we specify unsigned args. */
15914 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15919 do_neon_fmac (void)
15921 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15924 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15927 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15933 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15934 struct neon_type_el et
= neon_check_type (3, rs
,
15935 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15936 neon_three_same (neon_quad (rs
), 0, et
.size
);
15939 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15940 same types as the MAC equivalents. The polynomial type for this instruction
15941 is encoded the same as the integer type. */
15946 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15949 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15952 if (inst
.operands
[2].isscalar
)
15953 do_neon_mac_maybe_scalar ();
15955 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15959 do_neon_qdmulh (void)
15961 if (inst
.operands
[2].isscalar
)
15963 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15964 struct neon_type_el et
= neon_check_type (3, rs
,
15965 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15966 NEON_ENCODE (SCALAR
, inst
);
15967 neon_mul_mac (et
, neon_quad (rs
));
15971 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15972 struct neon_type_el et
= neon_check_type (3, rs
,
15973 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15974 NEON_ENCODE (INTEGER
, inst
);
15975 /* The U bit (rounding) comes from bit mask. */
15976 neon_three_same (neon_quad (rs
), 0, et
.size
);
15981 do_mve_vmull (void)
15984 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_DDS
,
15985 NS_QQS
, NS_QQQ
, NS_QQR
, NS_NULL
);
15986 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
15987 && inst
.cond
== COND_ALWAYS
15988 && ((unsigned)inst
.instruction
) == M_MNEM_vmullt
)
15993 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15994 N_SUF_32
| N_F64
| N_P8
15995 | N_P16
| N_I_MVE
| N_KEY
);
15996 if (((et
.type
== NT_poly
) && et
.size
== 8
15997 && ARM_CPU_IS_ANY (cpu_variant
))
15998 || (et
.type
== NT_integer
) || (et
.type
== NT_float
))
16005 constraint (rs
!= NS_QQQ
, BAD_FPU
);
16006 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16007 N_SU_32
| N_P8
| N_P16
| N_KEY
);
16009 /* We are dealing with MVE's vmullt. */
16011 && (inst
.operands
[0].reg
== inst
.operands
[1].reg
16012 || inst
.operands
[0].reg
== inst
.operands
[2].reg
))
16013 as_tsktsk (BAD_MVE_SRCDEST
);
16015 if (inst
.cond
> COND_ALWAYS
)
16016 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16018 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16020 if (et
.type
== NT_poly
)
16021 mve_encode_qqq (neon_logbits (et
.size
), 64);
16023 mve_encode_qqq (et
.type
== NT_unsigned
, et
.size
);
16028 inst
.instruction
= N_MNEM_vmul
;
16031 inst
.pred_insn_type
= INSIDE_IT_INSN
;
16036 do_mve_vabav (void)
16038 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
16043 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16046 struct neon_type_el et
= neon_check_type (2, NS_NULL
, N_EQK
, N_KEY
| N_S8
16047 | N_S16
| N_S32
| N_U8
| N_U16
16050 if (inst
.cond
> COND_ALWAYS
)
16051 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16053 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16055 mve_encode_rqq (et
.type
== NT_unsigned
, et
.size
);
16059 do_mve_vmladav (void)
16061 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
16062 struct neon_type_el et
= neon_check_type (3, rs
,
16063 N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
16065 if (et
.type
== NT_unsigned
16066 && (inst
.instruction
== M_MNEM_vmladavx
16067 || inst
.instruction
== M_MNEM_vmladavax
16068 || inst
.instruction
== M_MNEM_vmlsdav
16069 || inst
.instruction
== M_MNEM_vmlsdava
16070 || inst
.instruction
== M_MNEM_vmlsdavx
16071 || inst
.instruction
== M_MNEM_vmlsdavax
))
16072 first_error (BAD_SIMD_TYPE
);
16074 constraint (inst
.operands
[2].reg
> 14,
16075 _("MVE vector register in the range [Q0..Q7] expected"));
16077 if (inst
.cond
> COND_ALWAYS
)
16078 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16080 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16082 if (inst
.instruction
== M_MNEM_vmlsdav
16083 || inst
.instruction
== M_MNEM_vmlsdava
16084 || inst
.instruction
== M_MNEM_vmlsdavx
16085 || inst
.instruction
== M_MNEM_vmlsdavax
)
16086 inst
.instruction
|= (et
.size
== 8) << 28;
16088 inst
.instruction
|= (et
.size
== 8) << 8;
16090 mve_encode_rqq (et
.type
== NT_unsigned
, 64);
16091 inst
.instruction
|= (et
.size
== 32) << 16;
16095 do_neon_qrdmlah (void)
16097 /* Check we're on the correct architecture. */
16098 if (!mark_feature_used (&fpu_neon_ext_armv8
))
16100 _("instruction form not available on this architecture.");
16101 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
16103 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
16104 record_feature_use (&fpu_neon_ext_v8_1
);
16107 if (inst
.operands
[2].isscalar
)
16109 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16110 struct neon_type_el et
= neon_check_type (3, rs
,
16111 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16112 NEON_ENCODE (SCALAR
, inst
);
16113 neon_mul_mac (et
, neon_quad (rs
));
16117 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16118 struct neon_type_el et
= neon_check_type (3, rs
,
16119 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16120 NEON_ENCODE (INTEGER
, inst
);
16121 /* The U bit (rounding) comes from bit mask. */
16122 neon_three_same (neon_quad (rs
), 0, et
.size
);
16127 do_neon_fcmp_absolute (void)
16129 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16130 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16131 N_F_16_32
| N_KEY
);
16132 /* Size field comes from bit mask. */
16133 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
16137 do_neon_fcmp_absolute_inv (void)
16139 neon_exchange_operands ();
16140 do_neon_fcmp_absolute ();
16144 do_neon_step (void)
16146 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16147 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16148 N_F_16_32
| N_KEY
);
16149 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
16153 do_neon_abs_neg (void)
16155 enum neon_shape rs
;
16156 struct neon_type_el et
;
16158 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
16161 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16162 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
16164 if (check_simd_pred_availability (et
.type
== NT_float
,
16165 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16168 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16169 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16170 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16171 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16172 inst
.instruction
|= neon_quad (rs
) << 6;
16173 inst
.instruction
|= (et
.type
== NT_float
) << 10;
16174 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16176 neon_dp_fixup (&inst
);
16182 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16183 struct neon_type_el et
= neon_check_type (2, rs
,
16184 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16185 int imm
= inst
.operands
[2].imm
;
16186 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16187 _("immediate out of range for insert"));
16188 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16194 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16195 struct neon_type_el et
= neon_check_type (2, rs
,
16196 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16197 int imm
= inst
.operands
[2].imm
;
16198 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16199 _("immediate out of range for insert"));
16200 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
16204 do_neon_qshlu_imm (void)
16206 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16207 struct neon_type_el et
= neon_check_type (2, rs
,
16208 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
16209 int imm
= inst
.operands
[2].imm
;
16210 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16211 _("immediate out of range for shift"));
16212 /* Only encodes the 'U present' variant of the instruction.
16213 In this case, signed types have OP (bit 8) set to 0.
16214 Unsigned types have OP set to 1. */
16215 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
16216 /* The rest of the bits are the same as other immediate shifts. */
16217 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16221 do_neon_qmovn (void)
16223 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16224 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16225 /* Saturating move where operands can be signed or unsigned, and the
16226 destination has the same signedness. */
16227 NEON_ENCODE (INTEGER
, inst
);
16228 if (et
.type
== NT_unsigned
)
16229 inst
.instruction
|= 0xc0;
16231 inst
.instruction
|= 0x80;
16232 neon_two_same (0, 1, et
.size
/ 2);
16236 do_neon_qmovun (void)
16238 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16239 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16240 /* Saturating move with unsigned results. Operands must be signed. */
16241 NEON_ENCODE (INTEGER
, inst
);
16242 neon_two_same (0, 1, et
.size
/ 2);
16246 do_neon_rshift_sat_narrow (void)
16248 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16249 or unsigned. If operands are unsigned, results must also be unsigned. */
16250 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16251 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16252 int imm
= inst
.operands
[2].imm
;
16253 /* This gets the bounds check, size encoding and immediate bits calculation
16257 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
16258 VQMOVN.I<size> <Dd>, <Qm>. */
16261 inst
.operands
[2].present
= 0;
16262 inst
.instruction
= N_MNEM_vqmovn
;
16267 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16268 _("immediate out of range"));
16269 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
16273 do_neon_rshift_sat_narrow_u (void)
16275 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16276 or unsigned. If operands are unsigned, results must also be unsigned. */
16277 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16278 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16279 int imm
= inst
.operands
[2].imm
;
16280 /* This gets the bounds check, size encoding and immediate bits calculation
16284 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
16285 VQMOVUN.I<size> <Dd>, <Qm>. */
16288 inst
.operands
[2].present
= 0;
16289 inst
.instruction
= N_MNEM_vqmovun
;
16294 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16295 _("immediate out of range"));
16296 /* FIXME: The manual is kind of unclear about what value U should have in
16297 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
16299 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
16303 do_neon_movn (void)
16305 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16306 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16307 NEON_ENCODE (INTEGER
, inst
);
16308 neon_two_same (0, 1, et
.size
/ 2);
16312 do_neon_rshift_narrow (void)
16314 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16315 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16316 int imm
= inst
.operands
[2].imm
;
16317 /* This gets the bounds check, size encoding and immediate bits calculation
16321 /* If immediate is zero then we are a pseudo-instruction for
16322 VMOVN.I<size> <Dd>, <Qm> */
16325 inst
.operands
[2].present
= 0;
16326 inst
.instruction
= N_MNEM_vmovn
;
16331 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16332 _("immediate out of range for narrowing operation"));
16333 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
16337 do_neon_shll (void)
16339 /* FIXME: Type checking when lengthening. */
16340 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
16341 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
16342 unsigned imm
= inst
.operands
[2].imm
;
16344 if (imm
== et
.size
)
16346 /* Maximum shift variant. */
16347 NEON_ENCODE (INTEGER
, inst
);
16348 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16349 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16350 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16351 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16352 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16354 neon_dp_fixup (&inst
);
16358 /* A more-specific type check for non-max versions. */
16359 et
= neon_check_type (2, NS_QDI
,
16360 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16361 NEON_ENCODE (IMMED
, inst
);
16362 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
16366 /* Check the various types for the VCVT instruction, and return which version
16367 the current instruction is. */
16369 #define CVT_FLAVOUR_VAR \
16370 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
16371 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
16372 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
16373 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
16374 /* Half-precision conversions. */ \
16375 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16376 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16377 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
16378 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
16379 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
16380 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
16381 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
16382 Compared with single/double precision variants, only the co-processor \
16383 field is different, so the encoding flow is reused here. */ \
16384 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
16385 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
16386 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
16387 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
16388 /* VFP instructions. */ \
16389 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
16390 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
16391 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
16392 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
16393 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
16394 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
16395 /* VFP instructions with bitshift. */ \
16396 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
16397 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
16398 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
16399 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
16400 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
16401 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
16402 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
16403 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
16405 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
16406 neon_cvt_flavour_##C,
16408 /* The different types of conversions we can do. */
16409 enum neon_cvt_flavour
16412 neon_cvt_flavour_invalid
,
16413 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
16418 static enum neon_cvt_flavour
16419 get_neon_cvt_flavour (enum neon_shape rs
)
16421 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16422 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16423 if (et.type != NT_invtype) \
16425 inst.error = NULL; \
16426 return (neon_cvt_flavour_##C); \
16429 struct neon_type_el et
;
16430 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
16431 || rs
== NS_FF
) ? N_VFP
: 0;
16432 /* The instruction versions which take an immediate take one register
16433 argument, which is extended to the width of the full register. Thus the
16434 "source" and "destination" registers must have the same width. Hack that
16435 here by making the size equal to the key (wider, in this case) operand. */
16436 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
16440 return neon_cvt_flavour_invalid
;
16455 /* Neon-syntax VFP conversions. */
16458 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
16460 const char *opname
= 0;
16462 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
16463 || rs
== NS_FHI
|| rs
== NS_HFI
)
16465 /* Conversions with immediate bitshift. */
16466 const char *enc
[] =
16468 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16474 if (flavour
< (int) ARRAY_SIZE (enc
))
16476 opname
= enc
[flavour
];
16477 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
16478 _("operands 0 and 1 must be the same register"));
16479 inst
.operands
[1] = inst
.operands
[2];
16480 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
16485 /* Conversions without bitshift. */
16486 const char *enc
[] =
16488 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16494 if (flavour
< (int) ARRAY_SIZE (enc
))
16495 opname
= enc
[flavour
];
16499 do_vfp_nsyn_opcode (opname
);
16501 /* ARMv8.2 fp16 VCVT instruction. */
16502 if (flavour
== neon_cvt_flavour_s32_f16
16503 || flavour
== neon_cvt_flavour_u32_f16
16504 || flavour
== neon_cvt_flavour_f16_u32
16505 || flavour
== neon_cvt_flavour_f16_s32
)
16506 do_scalar_fp16_v82_encode ();
16510 do_vfp_nsyn_cvtz (void)
16512 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
16513 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16514 const char *enc
[] =
16516 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16522 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
16523 do_vfp_nsyn_opcode (enc
[flavour
]);
16527 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
16528 enum neon_cvt_mode mode
)
16533 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16534 D register operands. */
16535 if (flavour
== neon_cvt_flavour_s32_f64
16536 || flavour
== neon_cvt_flavour_u32_f64
)
16537 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16540 if (flavour
== neon_cvt_flavour_s32_f16
16541 || flavour
== neon_cvt_flavour_u32_f16
)
16542 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
16545 set_pred_insn_type (OUTSIDE_PRED_INSN
);
16549 case neon_cvt_flavour_s32_f64
:
16553 case neon_cvt_flavour_s32_f32
:
16557 case neon_cvt_flavour_s32_f16
:
16561 case neon_cvt_flavour_u32_f64
:
16565 case neon_cvt_flavour_u32_f32
:
16569 case neon_cvt_flavour_u32_f16
:
16574 first_error (_("invalid instruction shape"));
16580 case neon_cvt_mode_a
: rm
= 0; break;
16581 case neon_cvt_mode_n
: rm
= 1; break;
16582 case neon_cvt_mode_p
: rm
= 2; break;
16583 case neon_cvt_mode_m
: rm
= 3; break;
16584 default: first_error (_("invalid rounding mode")); return;
16587 NEON_ENCODE (FPV8
, inst
);
16588 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
16589 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
16590 inst
.instruction
|= sz
<< 8;
16592 /* ARMv8.2 fp16 VCVT instruction. */
16593 if (flavour
== neon_cvt_flavour_s32_f16
16594 ||flavour
== neon_cvt_flavour_u32_f16
)
16595 do_scalar_fp16_v82_encode ();
16596 inst
.instruction
|= op
<< 7;
16597 inst
.instruction
|= rm
<< 16;
16598 inst
.instruction
|= 0xf0000000;
16599 inst
.is_neon
= TRUE
;
16603 do_neon_cvt_1 (enum neon_cvt_mode mode
)
16605 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
16606 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
16607 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
16609 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16611 if (flavour
== neon_cvt_flavour_invalid
)
16614 /* PR11109: Handle round-to-zero for VCVT conversions. */
16615 if (mode
== neon_cvt_mode_z
16616 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
16617 && (flavour
== neon_cvt_flavour_s16_f16
16618 || flavour
== neon_cvt_flavour_u16_f16
16619 || flavour
== neon_cvt_flavour_s32_f32
16620 || flavour
== neon_cvt_flavour_u32_f32
16621 || flavour
== neon_cvt_flavour_s32_f64
16622 || flavour
== neon_cvt_flavour_u32_f64
)
16623 && (rs
== NS_FD
|| rs
== NS_FF
))
16625 do_vfp_nsyn_cvtz ();
16629 /* ARMv8.2 fp16 VCVT conversions. */
16630 if (mode
== neon_cvt_mode_z
16631 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
16632 && (flavour
== neon_cvt_flavour_s32_f16
16633 || flavour
== neon_cvt_flavour_u32_f16
)
16636 do_vfp_nsyn_cvtz ();
16637 do_scalar_fp16_v82_encode ();
16641 /* VFP rather than Neon conversions. */
16642 if (flavour
>= neon_cvt_flavour_first_fp
)
16644 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16645 do_vfp_nsyn_cvt (rs
, flavour
);
16647 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16658 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16659 0x0000100, 0x1000100, 0x0, 0x1000000};
16661 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16664 /* Fixed-point conversion with #0 immediate is encoded as an
16665 integer conversion. */
16666 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
16668 NEON_ENCODE (IMMED
, inst
);
16669 if (flavour
!= neon_cvt_flavour_invalid
)
16670 inst
.instruction
|= enctab
[flavour
];
16671 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16672 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16673 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16674 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16675 inst
.instruction
|= neon_quad (rs
) << 6;
16676 inst
.instruction
|= 1 << 21;
16677 if (flavour
< neon_cvt_flavour_s16_f16
)
16679 inst
.instruction
|= 1 << 21;
16680 immbits
= 32 - inst
.operands
[2].imm
;
16681 inst
.instruction
|= immbits
<< 16;
16685 inst
.instruction
|= 3 << 20;
16686 immbits
= 16 - inst
.operands
[2].imm
;
16687 inst
.instruction
|= immbits
<< 16;
16688 inst
.instruction
&= ~(1 << 9);
16691 neon_dp_fixup (&inst
);
16697 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
16699 NEON_ENCODE (FLOAT
, inst
);
16700 set_pred_insn_type (OUTSIDE_PRED_INSN
);
16702 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16705 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16706 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16707 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16708 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16709 inst
.instruction
|= neon_quad (rs
) << 6;
16710 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
16711 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
16712 inst
.instruction
|= mode
<< 8;
16713 if (flavour
== neon_cvt_flavour_u16_f16
16714 || flavour
== neon_cvt_flavour_s16_f16
)
16715 /* Mask off the original size bits and reencode them. */
16716 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
16719 inst
.instruction
|= 0xfc000000;
16721 inst
.instruction
|= 0xf0000000;
16727 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
16728 0x100, 0x180, 0x0, 0x080};
16730 NEON_ENCODE (INTEGER
, inst
);
16732 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16735 if (flavour
!= neon_cvt_flavour_invalid
)
16736 inst
.instruction
|= enctab
[flavour
];
16738 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16739 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16740 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16741 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16742 inst
.instruction
|= neon_quad (rs
) << 6;
16743 if (flavour
>= neon_cvt_flavour_s16_f16
16744 && flavour
<= neon_cvt_flavour_f16_u16
)
16745 /* Half precision. */
16746 inst
.instruction
|= 1 << 18;
16748 inst
.instruction
|= 2 << 18;
16750 neon_dp_fixup (&inst
);
16755 /* Half-precision conversions for Advanced SIMD -- neon. */
16758 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16762 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
16764 as_bad (_("operand size must match register width"));
16769 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
16771 as_bad (_("operand size must match register width"));
16776 inst
.instruction
= 0x3b60600;
16778 inst
.instruction
= 0x3b60700;
16780 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16781 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16782 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16783 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16784 neon_dp_fixup (&inst
);
16788 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16789 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16790 do_vfp_nsyn_cvt (rs
, flavour
);
16792 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16797 do_neon_cvtr (void)
16799 do_neon_cvt_1 (neon_cvt_mode_x
);
16805 do_neon_cvt_1 (neon_cvt_mode_z
);
16809 do_neon_cvta (void)
16811 do_neon_cvt_1 (neon_cvt_mode_a
);
16815 do_neon_cvtn (void)
16817 do_neon_cvt_1 (neon_cvt_mode_n
);
16821 do_neon_cvtp (void)
16823 do_neon_cvt_1 (neon_cvt_mode_p
);
16827 do_neon_cvtm (void)
16829 do_neon_cvt_1 (neon_cvt_mode_m
);
16833 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
16836 mark_feature_used (&fpu_vfp_ext_armv8
);
16838 encode_arm_vfp_reg (inst
.operands
[0].reg
,
16839 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
16840 encode_arm_vfp_reg (inst
.operands
[1].reg
,
16841 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
16842 inst
.instruction
|= to
? 0x10000 : 0;
16843 inst
.instruction
|= t
? 0x80 : 0;
16844 inst
.instruction
|= is_double
? 0x100 : 0;
16845 do_vfp_cond_or_thumb ();
16849 do_neon_cvttb_1 (bfd_boolean t
)
16851 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
16852 NS_DF
, NS_DH
, NS_NULL
);
16856 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
16859 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
16861 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
16864 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
16866 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
16868 /* The VCVTB and VCVTT instructions with D-register operands
16869 don't work for SP only targets. */
16870 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16874 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
16876 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
16878 /* The VCVTB and VCVTT instructions with D-register operands
16879 don't work for SP only targets. */
16880 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16884 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
16891 do_neon_cvtb (void)
16893 do_neon_cvttb_1 (FALSE
);
16898 do_neon_cvtt (void)
16900 do_neon_cvttb_1 (TRUE
);
16904 neon_move_immediate (void)
16906 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
16907 struct neon_type_el et
= neon_check_type (2, rs
,
16908 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
16909 unsigned immlo
, immhi
= 0, immbits
;
16910 int op
, cmode
, float_p
;
16912 constraint (et
.type
== NT_invtype
,
16913 _("operand size must be specified for immediate VMOV"));
16915 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16916 op
= (inst
.instruction
& (1 << 5)) != 0;
16918 immlo
= inst
.operands
[1].imm
;
16919 if (inst
.operands
[1].regisimm
)
16920 immhi
= inst
.operands
[1].reg
;
16922 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
16923 _("immediate has bits set outside the operand size"));
16925 float_p
= inst
.operands
[1].immisfloat
;
16927 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
16928 et
.size
, et
.type
)) == FAIL
)
16930 /* Invert relevant bits only. */
16931 neon_invert_size (&immlo
, &immhi
, et
.size
);
16932 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16933 with one or the other; those cases are caught by
16934 neon_cmode_for_move_imm. */
16936 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
16937 &op
, et
.size
, et
.type
)) == FAIL
)
16939 first_error (_("immediate out of range"));
16944 inst
.instruction
&= ~(1 << 5);
16945 inst
.instruction
|= op
<< 5;
16947 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16948 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16949 inst
.instruction
|= neon_quad (rs
) << 6;
16950 inst
.instruction
|= cmode
<< 8;
16952 neon_write_immbits (immbits
);
16958 if (inst
.operands
[1].isreg
)
16960 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16962 NEON_ENCODE (INTEGER
, inst
);
16963 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16964 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16965 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16966 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16967 inst
.instruction
|= neon_quad (rs
) << 6;
16971 NEON_ENCODE (IMMED
, inst
);
16972 neon_move_immediate ();
16975 neon_dp_fixup (&inst
);
16978 /* Encode instructions of form:
16980 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16981 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16984 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16986 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16987 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16988 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16989 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16990 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16991 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16992 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16993 inst
.instruction
|= neon_logbits (size
) << 20;
16995 neon_dp_fixup (&inst
);
16999 do_neon_dyadic_long (void)
17001 enum neon_shape rs
= neon_select_shape (NS_QDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
17004 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH
| NEON_CHECK_CC
) == FAIL
)
17007 NEON_ENCODE (INTEGER
, inst
);
17008 /* FIXME: Type checking for lengthening op. */
17009 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17010 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
17011 neon_mixed_length (et
, et
.size
);
17013 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
17014 && (inst
.cond
== 0xf || inst
.cond
== 0x10))
17016 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
17017 in an IT block with le/lt conditions. */
17019 if (inst
.cond
== 0xf)
17021 else if (inst
.cond
== 0x10)
17024 inst
.pred_insn_type
= INSIDE_IT_INSN
;
17026 if (inst
.instruction
== N_MNEM_vaddl
)
17028 inst
.instruction
= N_MNEM_vadd
;
17029 do_neon_addsub_if_i ();
17031 else if (inst
.instruction
== N_MNEM_vsubl
)
17033 inst
.instruction
= N_MNEM_vsub
;
17034 do_neon_addsub_if_i ();
17036 else if (inst
.instruction
== N_MNEM_vabdl
)
17038 inst
.instruction
= N_MNEM_vabd
;
17039 do_neon_dyadic_if_su ();
17043 first_error (BAD_FPU
);
17047 do_neon_abal (void)
17049 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17050 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
17051 neon_mixed_length (et
, et
.size
);
17055 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
17057 if (inst
.operands
[2].isscalar
)
17059 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
17060 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
17061 NEON_ENCODE (SCALAR
, inst
);
17062 neon_mul_mac (et
, et
.type
== NT_unsigned
);
17066 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17067 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
17068 NEON_ENCODE (INTEGER
, inst
);
17069 neon_mixed_length (et
, et
.size
);
17074 do_neon_mac_maybe_scalar_long (void)
17076 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
17079 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
17080 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
17083 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
17085 unsigned regno
= NEON_SCALAR_REG (scalar
);
17086 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
17090 if (regno
> 7 || elno
> 3)
17093 return ((regno
& 0x7)
17094 | ((elno
& 0x1) << 3)
17095 | (((elno
>> 1) & 0x1) << 5));
17099 if (regno
> 15 || elno
> 1)
17102 return (((regno
& 0x1) << 5)
17103 | ((regno
>> 1) & 0x7)
17104 | ((elno
& 0x1) << 3));
17108 first_error (_("scalar out of range for multiply instruction"));
17113 do_neon_fmac_maybe_scalar_long (int subtype
)
17115 enum neon_shape rs
;
17117 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
17118 field (bits[21:20]) has different meaning. For scalar index variant, it's
17119 used to differentiate add and subtract, otherwise it's with fixed value
17123 if (inst
.cond
!= COND_ALWAYS
)
17124 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
17125 "behaviour is UNPREDICTABLE"));
17127 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
17130 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17133 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
17134 be a scalar index register. */
17135 if (inst
.operands
[2].isscalar
)
17137 high8
= 0xfe000000;
17140 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
17144 high8
= 0xfc000000;
17147 inst
.instruction
|= (0x1 << 23);
17148 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
17151 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
);
17153 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
17154 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
17155 so we simply pass -1 as size. */
17156 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
17157 neon_three_same (quad_p
, 0, size
);
17159 /* Undo neon_dp_fixup. Redo the high eight bits. */
17160 inst
.instruction
&= 0x00ffffff;
17161 inst
.instruction
|= high8
;
17163 #define LOW1(R) ((R) & 0x1)
17164 #define HI4(R) (((R) >> 1) & 0xf)
17165 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
17166 whether the instruction is in Q form and whether Vm is a scalar indexed
17168 if (inst
.operands
[2].isscalar
)
17171 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
17172 inst
.instruction
&= 0xffffffd0;
17173 inst
.instruction
|= rm
;
17177 /* Redo Rn as well. */
17178 inst
.instruction
&= 0xfff0ff7f;
17179 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17180 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17185 /* Redo Rn and Rm. */
17186 inst
.instruction
&= 0xfff0ff50;
17187 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17188 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17189 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
17190 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
17195 do_neon_vfmal (void)
17197 return do_neon_fmac_maybe_scalar_long (0);
17201 do_neon_vfmsl (void)
17203 return do_neon_fmac_maybe_scalar_long (1);
17207 do_neon_dyadic_wide (void)
17209 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
17210 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17211 neon_mixed_length (et
, et
.size
);
17215 do_neon_dyadic_narrow (void)
17217 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17218 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
17219 /* Operand sign is unimportant, and the U bit is part of the opcode,
17220 so force the operand type to integer. */
17221 et
.type
= NT_integer
;
17222 neon_mixed_length (et
, et
.size
/ 2);
17226 do_neon_mul_sat_scalar_long (void)
17228 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
17232 do_neon_vmull (void)
17234 if (inst
.operands
[2].isscalar
)
17235 do_neon_mac_maybe_scalar_long ();
17238 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17239 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
17241 if (et
.type
== NT_poly
)
17242 NEON_ENCODE (POLY
, inst
);
17244 NEON_ENCODE (INTEGER
, inst
);
17246 /* For polynomial encoding the U bit must be zero, and the size must
17247 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
17248 obviously, as 0b10). */
17251 /* Check we're on the correct architecture. */
17252 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
17254 _("Instruction form not available on this architecture.");
17259 neon_mixed_length (et
, et
.size
);
17266 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17267 struct neon_type_el et
= neon_check_type (3, rs
,
17268 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
17269 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
17271 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
17272 _("shift out of range"));
17273 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17274 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17275 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17276 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17277 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17278 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17279 inst
.instruction
|= neon_quad (rs
) << 6;
17280 inst
.instruction
|= imm
<< 8;
17282 neon_dp_fixup (&inst
);
17288 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17289 struct neon_type_el et
= neon_check_type (2, rs
,
17290 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17291 unsigned op
= (inst
.instruction
>> 7) & 3;
17292 /* N (width of reversed regions) is encoded as part of the bitmask. We
17293 extract it here to check the elements to be reversed are smaller.
17294 Otherwise we'd get a reserved instruction. */
17295 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
17296 gas_assert (elsize
!= 0);
17297 constraint (et
.size
>= elsize
,
17298 _("elements must be smaller than reversal region"));
17299 neon_two_same (neon_quad (rs
), 1, et
.size
);
17305 if (inst
.operands
[1].isscalar
)
17307 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
17308 struct neon_type_el et
= neon_check_type (2, rs
,
17309 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17310 unsigned sizebits
= et
.size
>> 3;
17311 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17312 int logsize
= neon_logbits (et
.size
);
17313 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
17315 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
17318 NEON_ENCODE (SCALAR
, inst
);
17319 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17320 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17321 inst
.instruction
|= LOW4 (dm
);
17322 inst
.instruction
|= HI1 (dm
) << 5;
17323 inst
.instruction
|= neon_quad (rs
) << 6;
17324 inst
.instruction
|= x
<< 17;
17325 inst
.instruction
|= sizebits
<< 16;
17327 neon_dp_fixup (&inst
);
17331 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
17332 struct neon_type_el et
= neon_check_type (2, rs
,
17333 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17334 /* Duplicate ARM register to lanes of vector. */
17335 NEON_ENCODE (ARMREG
, inst
);
17338 case 8: inst
.instruction
|= 0x400000; break;
17339 case 16: inst
.instruction
|= 0x000020; break;
17340 case 32: inst
.instruction
|= 0x000000; break;
17343 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17344 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
17345 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
17346 inst
.instruction
|= neon_quad (rs
) << 21;
17347 /* The encoding for this instruction is identical for the ARM and Thumb
17348 variants, except for the condition field. */
17349 do_vfp_cond_or_thumb ();
17353 /* VMOV has particularly many variations. It can be one of:
17354 0. VMOV<c><q> <Qd>, <Qm>
17355 1. VMOV<c><q> <Dd>, <Dm>
17356 (Register operations, which are VORR with Rm = Rn.)
17357 2. VMOV<c><q>.<dt> <Qd>, #<imm>
17358 3. VMOV<c><q>.<dt> <Dd>, #<imm>
17360 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
17361 (ARM register to scalar.)
17362 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
17363 (Two ARM registers to vector.)
17364 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
17365 (Scalar to ARM register.)
17366 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
17367 (Vector to two ARM registers.)
17368 8. VMOV.F32 <Sd>, <Sm>
17369 9. VMOV.F64 <Dd>, <Dm>
17370 (VFP register moves.)
17371 10. VMOV.F32 <Sd>, #imm
17372 11. VMOV.F64 <Dd>, #imm
17373 (VFP float immediate load.)
17374 12. VMOV <Rd>, <Sm>
17375 (VFP single to ARM reg.)
17376 13. VMOV <Sd>, <Rm>
17377 (ARM reg to VFP single.)
17378 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
17379 (Two ARM regs to two VFP singles.)
17380 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
17381 (Two VFP singles to two ARM regs.)
17383 These cases can be disambiguated using neon_select_shape, except cases 1/9
17384 and 3/11 which depend on the operand type too.
17386 All the encoded bits are hardcoded by this function.
17388 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
17389 Cases 5, 7 may be used with VFPv2 and above.
17391 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
17392 can specify a type where it doesn't make sense to, and is ignored). */
17397 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
17398 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
17399 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
17400 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
17401 struct neon_type_el et
;
17402 const char *ldconst
= 0;
17406 case NS_DD
: /* case 1/9. */
17407 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17408 /* It is not an error here if no type is given. */
17410 if (et
.type
== NT_float
&& et
.size
== 64)
17412 do_vfp_nsyn_opcode ("fcpyd");
17415 /* fall through. */
17417 case NS_QQ
: /* case 0/1. */
17419 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17421 /* The architecture manual I have doesn't explicitly state which
17422 value the U bit should have for register->register moves, but
17423 the equivalent VORR instruction has U = 0, so do that. */
17424 inst
.instruction
= 0x0200110;
17425 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17426 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17427 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17428 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17429 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17430 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17431 inst
.instruction
|= neon_quad (rs
) << 6;
17433 neon_dp_fixup (&inst
);
17437 case NS_DI
: /* case 3/11. */
17438 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17440 if (et
.type
== NT_float
&& et
.size
== 64)
17442 /* case 11 (fconstd). */
17443 ldconst
= "fconstd";
17444 goto encode_fconstd
;
17446 /* fall through. */
17448 case NS_QI
: /* case 2/3. */
17449 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17451 inst
.instruction
= 0x0800010;
17452 neon_move_immediate ();
17453 neon_dp_fixup (&inst
);
17456 case NS_SR
: /* case 4. */
17458 unsigned bcdebits
= 0;
17460 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
17461 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
17463 /* .<size> is optional here, defaulting to .32. */
17464 if (inst
.vectype
.elems
== 0
17465 && inst
.operands
[0].vectype
.type
== NT_invtype
17466 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17468 inst
.vectype
.el
[0].type
= NT_untyped
;
17469 inst
.vectype
.el
[0].size
= 32;
17470 inst
.vectype
.elems
= 1;
17473 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17474 logsize
= neon_logbits (et
.size
);
17476 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17478 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17479 && et
.size
!= 32, _(BAD_FPU
));
17480 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17481 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17485 case 8: bcdebits
= 0x8; break;
17486 case 16: bcdebits
= 0x1; break;
17487 case 32: bcdebits
= 0x0; break;
17491 bcdebits
|= x
<< logsize
;
17493 inst
.instruction
= 0xe000b10;
17494 do_vfp_cond_or_thumb ();
17495 inst
.instruction
|= LOW4 (dn
) << 16;
17496 inst
.instruction
|= HI1 (dn
) << 7;
17497 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17498 inst
.instruction
|= (bcdebits
& 3) << 5;
17499 inst
.instruction
|= (bcdebits
>> 2) << 21;
17503 case NS_DRR
: /* case 5 (fmdrr). */
17504 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17507 inst
.instruction
= 0xc400b10;
17508 do_vfp_cond_or_thumb ();
17509 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
17510 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
17511 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17512 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
17515 case NS_RS
: /* case 6. */
17518 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17519 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
17520 unsigned abcdebits
= 0;
17522 /* .<dt> is optional here, defaulting to .32. */
17523 if (inst
.vectype
.elems
== 0
17524 && inst
.operands
[0].vectype
.type
== NT_invtype
17525 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17527 inst
.vectype
.el
[0].type
= NT_untyped
;
17528 inst
.vectype
.el
[0].size
= 32;
17529 inst
.vectype
.elems
= 1;
17532 et
= neon_check_type (2, NS_NULL
,
17533 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
17534 logsize
= neon_logbits (et
.size
);
17536 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17538 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17539 && et
.size
!= 32, _(BAD_FPU
));
17540 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17541 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17545 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
17546 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
17547 case 32: abcdebits
= 0x00; break;
17551 abcdebits
|= x
<< logsize
;
17552 inst
.instruction
= 0xe100b10;
17553 do_vfp_cond_or_thumb ();
17554 inst
.instruction
|= LOW4 (dn
) << 16;
17555 inst
.instruction
|= HI1 (dn
) << 7;
17556 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17557 inst
.instruction
|= (abcdebits
& 3) << 5;
17558 inst
.instruction
|= (abcdebits
>> 2) << 21;
17562 case NS_RRD
: /* case 7 (fmrrd). */
17563 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17566 inst
.instruction
= 0xc500b10;
17567 do_vfp_cond_or_thumb ();
17568 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17569 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17570 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17571 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17574 case NS_FF
: /* case 8 (fcpys). */
17575 do_vfp_nsyn_opcode ("fcpys");
17579 case NS_FI
: /* case 10 (fconsts). */
17580 ldconst
= "fconsts";
17582 if (!inst
.operands
[1].immisfloat
)
17585 /* Immediate has to fit in 8 bits so float is enough. */
17586 float imm
= (float) inst
.operands
[1].imm
;
17587 memcpy (&new_imm
, &imm
, sizeof (float));
17588 /* But the assembly may have been written to provide an integer
17589 bit pattern that equates to a float, so check that the
17590 conversion has worked. */
17591 if (is_quarter_float (new_imm
))
17593 if (is_quarter_float (inst
.operands
[1].imm
))
17594 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17596 inst
.operands
[1].imm
= new_imm
;
17597 inst
.operands
[1].immisfloat
= 1;
17601 if (is_quarter_float (inst
.operands
[1].imm
))
17603 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
17604 do_vfp_nsyn_opcode (ldconst
);
17606 /* ARMv8.2 fp16 vmov.f16 instruction. */
17608 do_scalar_fp16_v82_encode ();
17611 first_error (_("immediate out of range"));
17615 case NS_RF
: /* case 12 (fmrs). */
17616 do_vfp_nsyn_opcode ("fmrs");
17617 /* ARMv8.2 fp16 vmov.f16 instruction. */
17619 do_scalar_fp16_v82_encode ();
17623 case NS_FR
: /* case 13 (fmsr). */
17624 do_vfp_nsyn_opcode ("fmsr");
17625 /* ARMv8.2 fp16 vmov.f16 instruction. */
17627 do_scalar_fp16_v82_encode ();
17630 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17631 (one of which is a list), but we have parsed four. Do some fiddling to
17632 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17634 case NS_RRFF
: /* case 14 (fmrrs). */
17635 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
17636 _("VFP registers must be adjacent"));
17637 inst
.operands
[2].imm
= 2;
17638 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17639 do_vfp_nsyn_opcode ("fmrrs");
17642 case NS_FFRR
: /* case 15 (fmsrr). */
17643 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
17644 _("VFP registers must be adjacent"));
17645 inst
.operands
[1] = inst
.operands
[2];
17646 inst
.operands
[2] = inst
.operands
[3];
17647 inst
.operands
[0].imm
= 2;
17648 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17649 do_vfp_nsyn_opcode ("fmsrr");
17653 /* neon_select_shape has determined that the instruction
17654 shape is wrong and has already set the error message. */
17663 do_neon_rshift_round_imm (void)
17665 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17666 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
17667 int imm
= inst
.operands
[2].imm
;
17669 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17672 inst
.operands
[2].present
= 0;
17677 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
17678 _("immediate out of range for shift"));
17679 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
17684 do_neon_movhf (void)
17686 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
17687 constraint (rs
!= NS_HH
, _("invalid suffix"));
17689 if (inst
.cond
!= COND_ALWAYS
)
17693 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17694 " the behaviour is UNPREDICTABLE"));
17698 inst
.error
= BAD_COND
;
17703 do_vfp_sp_monadic ();
17706 inst
.instruction
|= 0xf0000000;
17710 do_neon_movl (void)
17712 struct neon_type_el et
= neon_check_type (2, NS_QD
,
17713 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17714 unsigned sizebits
= et
.size
>> 3;
17715 inst
.instruction
|= sizebits
<< 19;
17716 neon_two_same (0, et
.type
== NT_unsigned
, -1);
17722 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17723 struct neon_type_el et
= neon_check_type (2, rs
,
17724 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17725 NEON_ENCODE (INTEGER
, inst
);
17726 neon_two_same (neon_quad (rs
), 1, et
.size
);
17730 do_neon_zip_uzp (void)
17732 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17733 struct neon_type_el et
= neon_check_type (2, rs
,
17734 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17735 if (rs
== NS_DD
&& et
.size
== 32)
17737 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17738 inst
.instruction
= N_MNEM_vtrn
;
17742 neon_two_same (neon_quad (rs
), 1, et
.size
);
17746 do_neon_sat_abs_neg (void)
17748 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17749 struct neon_type_el et
= neon_check_type (2, rs
,
17750 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17751 neon_two_same (neon_quad (rs
), 1, et
.size
);
17755 do_neon_pair_long (void)
17757 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17758 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
17759 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17760 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
17761 neon_two_same (neon_quad (rs
), 1, et
.size
);
17765 do_neon_recip_est (void)
17767 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17768 struct neon_type_el et
= neon_check_type (2, rs
,
17769 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
17770 inst
.instruction
|= (et
.type
== NT_float
) << 8;
17771 neon_two_same (neon_quad (rs
), 1, et
.size
);
17777 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17778 struct neon_type_el et
= neon_check_type (2, rs
,
17779 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17780 neon_two_same (neon_quad (rs
), 1, et
.size
);
17786 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17787 struct neon_type_el et
= neon_check_type (2, rs
,
17788 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
17789 neon_two_same (neon_quad (rs
), 1, et
.size
);
17795 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17796 struct neon_type_el et
= neon_check_type (2, rs
,
17797 N_EQK
| N_INT
, N_8
| N_KEY
);
17798 neon_two_same (neon_quad (rs
), 1, et
.size
);
17804 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17805 neon_two_same (neon_quad (rs
), 1, -1);
17809 do_neon_tbl_tbx (void)
17811 unsigned listlenbits
;
17812 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
17814 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
17816 first_error (_("bad list length for table lookup"));
17820 listlenbits
= inst
.operands
[1].imm
- 1;
17821 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17822 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17823 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17824 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17825 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17826 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17827 inst
.instruction
|= listlenbits
<< 8;
17829 neon_dp_fixup (&inst
);
17833 do_neon_ldm_stm (void)
17835 /* P, U and L bits are part of bitmask. */
17836 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
17837 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
17839 if (inst
.operands
[1].issingle
)
17841 do_vfp_nsyn_ldm_stm (is_dbmode
);
17845 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
17846 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17848 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
17849 _("register list must contain at least 1 and at most 16 "
17852 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
17853 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
17854 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17855 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
17857 inst
.instruction
|= offsetbits
;
17859 do_vfp_cond_or_thumb ();
17863 do_neon_ldr_str (void)
17865 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
17867 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17868 And is UNPREDICTABLE in thumb mode. */
17870 && inst
.operands
[1].reg
== REG_PC
17871 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
17874 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17875 else if (warn_on_deprecated
)
17876 as_tsktsk (_("Use of PC here is deprecated"));
17879 if (inst
.operands
[0].issingle
)
17882 do_vfp_nsyn_opcode ("flds");
17884 do_vfp_nsyn_opcode ("fsts");
17886 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17887 if (inst
.vectype
.el
[0].size
== 16)
17888 do_scalar_fp16_v82_encode ();
17893 do_vfp_nsyn_opcode ("fldd");
17895 do_vfp_nsyn_opcode ("fstd");
17900 do_t_vldr_vstr_sysreg (void)
17902 int fp_vldr_bitno
= 20, sysreg_vldr_bitno
= 20;
17903 bfd_boolean is_vldr
= ((inst
.instruction
& (1 << fp_vldr_bitno
)) != 0);
17905 /* Use of PC is UNPREDICTABLE. */
17906 if (inst
.operands
[1].reg
== REG_PC
)
17907 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17909 if (inst
.operands
[1].immisreg
)
17910 inst
.error
= _("instruction does not accept register index");
17912 if (!inst
.operands
[1].isreg
)
17913 inst
.error
= _("instruction does not accept PC-relative addressing");
17915 if (abs (inst
.operands
[1].imm
) >= (1 << 7))
17916 inst
.error
= _("immediate value out of range");
17918 inst
.instruction
= 0xec000f80;
17920 inst
.instruction
|= 1 << sysreg_vldr_bitno
;
17921 encode_arm_cp_address (1, TRUE
, FALSE
, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
);
17922 inst
.instruction
|= (inst
.operands
[0].imm
& 0x7) << 13;
17923 inst
.instruction
|= (inst
.operands
[0].imm
& 0x8) << 19;
17927 do_vldr_vstr (void)
17929 bfd_boolean sysreg_op
= !inst
.operands
[0].isreg
;
17931 /* VLDR/VSTR (System Register). */
17934 if (!mark_feature_used (&arm_ext_v8_1m_main
))
17935 as_bad (_("Instruction not permitted on this architecture"));
17937 do_t_vldr_vstr_sysreg ();
17942 if (!mark_feature_used (&fpu_vfp_ext_v1xd
))
17943 as_bad (_("Instruction not permitted on this architecture"));
17944 do_neon_ldr_str ();
17948 /* "interleave" version also handles non-interleaving register VLD1/VST1
17952 do_neon_ld_st_interleave (void)
17954 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
17955 N_8
| N_16
| N_32
| N_64
);
17956 unsigned alignbits
= 0;
17958 /* The bits in this table go:
17959 0: register stride of one (0) or two (1)
17960 1,2: register list length, minus one (1, 2, 3, 4).
17961 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17962 We use -1 for invalid entries. */
17963 const int typetable
[] =
17965 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17966 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17967 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17968 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17972 if (et
.type
== NT_invtype
)
17975 if (inst
.operands
[1].immisalign
)
17976 switch (inst
.operands
[1].imm
>> 8)
17978 case 64: alignbits
= 1; break;
17980 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
17981 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17982 goto bad_alignment
;
17986 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17987 goto bad_alignment
;
17992 first_error (_("bad alignment"));
17996 inst
.instruction
|= alignbits
<< 4;
17997 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17999 /* Bits [4:6] of the immediate in a list specifier encode register stride
18000 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
18001 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
18002 up the right value for "type" in a table based on this value and the given
18003 list style, then stick it back. */
18004 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
18005 | (((inst
.instruction
>> 8) & 3) << 3);
18007 typebits
= typetable
[idx
];
18009 constraint (typebits
== -1, _("bad list type for instruction"));
18010 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
18011 _("bad element type for instruction"));
18013 inst
.instruction
&= ~0xf00;
18014 inst
.instruction
|= typebits
<< 8;
18017 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
18018 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
18019 otherwise. The variable arguments are a list of pairs of legal (size, align)
18020 values, terminated with -1. */
18023 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
18026 int result
= FAIL
, thissize
, thisalign
;
18028 if (!inst
.operands
[1].immisalign
)
18034 va_start (ap
, do_alignment
);
18038 thissize
= va_arg (ap
, int);
18039 if (thissize
== -1)
18041 thisalign
= va_arg (ap
, int);
18043 if (size
== thissize
&& align
== thisalign
)
18046 while (result
!= SUCCESS
);
18050 if (result
== SUCCESS
)
18053 first_error (_("unsupported alignment for instruction"));
18059 do_neon_ld_st_lane (void)
18061 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
18062 int align_good
, do_alignment
= 0;
18063 int logsize
= neon_logbits (et
.size
);
18064 int align
= inst
.operands
[1].imm
>> 8;
18065 int n
= (inst
.instruction
>> 8) & 3;
18066 int max_el
= 64 / et
.size
;
18068 if (et
.type
== NT_invtype
)
18071 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
18072 _("bad list length"));
18073 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
18074 _("scalar index out of range"));
18075 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
18077 _("stride of 2 unavailable when element size is 8"));
18081 case 0: /* VLD1 / VST1. */
18082 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
18084 if (align_good
== FAIL
)
18088 unsigned alignbits
= 0;
18091 case 16: alignbits
= 0x1; break;
18092 case 32: alignbits
= 0x3; break;
18095 inst
.instruction
|= alignbits
<< 4;
18099 case 1: /* VLD2 / VST2. */
18100 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
18101 16, 32, 32, 64, -1);
18102 if (align_good
== FAIL
)
18105 inst
.instruction
|= 1 << 4;
18108 case 2: /* VLD3 / VST3. */
18109 constraint (inst
.operands
[1].immisalign
,
18110 _("can't use alignment with this instruction"));
18113 case 3: /* VLD4 / VST4. */
18114 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18115 16, 64, 32, 64, 32, 128, -1);
18116 if (align_good
== FAIL
)
18120 unsigned alignbits
= 0;
18123 case 8: alignbits
= 0x1; break;
18124 case 16: alignbits
= 0x1; break;
18125 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
18128 inst
.instruction
|= alignbits
<< 4;
18135 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
18136 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18137 inst
.instruction
|= 1 << (4 + logsize
);
18139 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
18140 inst
.instruction
|= logsize
<< 10;
18143 /* Encode single n-element structure to all lanes VLD<n> instructions. */
18146 do_neon_ld_dup (void)
18148 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
18149 int align_good
, do_alignment
= 0;
18151 if (et
.type
== NT_invtype
)
18154 switch ((inst
.instruction
>> 8) & 3)
18156 case 0: /* VLD1. */
18157 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
18158 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18159 &do_alignment
, 16, 16, 32, 32, -1);
18160 if (align_good
== FAIL
)
18162 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
18165 case 2: inst
.instruction
|= 1 << 5; break;
18166 default: first_error (_("bad list length")); return;
18168 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18171 case 1: /* VLD2. */
18172 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18173 &do_alignment
, 8, 16, 16, 32, 32, 64,
18175 if (align_good
== FAIL
)
18177 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
18178 _("bad list length"));
18179 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18180 inst
.instruction
|= 1 << 5;
18181 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18184 case 2: /* VLD3. */
18185 constraint (inst
.operands
[1].immisalign
,
18186 _("can't use alignment with this instruction"));
18187 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
18188 _("bad list length"));
18189 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18190 inst
.instruction
|= 1 << 5;
18191 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18194 case 3: /* VLD4. */
18196 int align
= inst
.operands
[1].imm
>> 8;
18197 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18198 16, 64, 32, 64, 32, 128, -1);
18199 if (align_good
== FAIL
)
18201 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
18202 _("bad list length"));
18203 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18204 inst
.instruction
|= 1 << 5;
18205 if (et
.size
== 32 && align
== 128)
18206 inst
.instruction
|= 0x3 << 6;
18208 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18215 inst
.instruction
|= do_alignment
<< 4;
18218 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
18219 apart from bits [11:4]. */
18222 do_neon_ldx_stx (void)
18224 if (inst
.operands
[1].isreg
)
18225 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
18227 switch (NEON_LANE (inst
.operands
[0].imm
))
18229 case NEON_INTERLEAVE_LANES
:
18230 NEON_ENCODE (INTERLV
, inst
);
18231 do_neon_ld_st_interleave ();
18234 case NEON_ALL_LANES
:
18235 NEON_ENCODE (DUP
, inst
);
18236 if (inst
.instruction
== N_INV
)
18238 first_error ("only loads support such operands");
18245 NEON_ENCODE (LANE
, inst
);
18246 do_neon_ld_st_lane ();
18249 /* L bit comes from bit mask. */
18250 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18251 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18252 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
18254 if (inst
.operands
[1].postind
)
18256 int postreg
= inst
.operands
[1].imm
& 0xf;
18257 constraint (!inst
.operands
[1].immisreg
,
18258 _("post-index must be a register"));
18259 constraint (postreg
== 0xd || postreg
== 0xf,
18260 _("bad register for post-index"));
18261 inst
.instruction
|= postreg
;
18265 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
18266 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
18267 || inst
.relocs
[0].exp
.X_add_number
!= 0,
18270 if (inst
.operands
[1].writeback
)
18272 inst
.instruction
|= 0xd;
18275 inst
.instruction
|= 0xf;
18279 inst
.instruction
|= 0xf9000000;
18281 inst
.instruction
|= 0xf4000000;
18286 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
18288 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18289 D register operands. */
18290 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18291 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18294 NEON_ENCODE (FPV8
, inst
);
18296 if (rs
== NS_FFF
|| rs
== NS_HHH
)
18298 do_vfp_sp_dyadic ();
18300 /* ARMv8.2 fp16 instruction. */
18302 do_scalar_fp16_v82_encode ();
18305 do_vfp_dp_rd_rn_rm ();
18308 inst
.instruction
|= 0x100;
18310 inst
.instruction
|= 0xf0000000;
18316 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18318 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
18319 first_error (_("invalid instruction shape"));
18325 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18327 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
18330 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18333 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
18337 do_vrint_1 (enum neon_cvt_mode mode
)
18339 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
18340 struct neon_type_el et
;
18345 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18346 D register operands. */
18347 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18348 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18351 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
18353 if (et
.type
!= NT_invtype
)
18355 /* VFP encodings. */
18356 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
18357 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
18358 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18360 NEON_ENCODE (FPV8
, inst
);
18361 if (rs
== NS_FF
|| rs
== NS_HH
)
18362 do_vfp_sp_monadic ();
18364 do_vfp_dp_rd_rm ();
18368 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
18369 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
18370 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
18371 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
18372 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
18373 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
18374 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
18378 inst
.instruction
|= (rs
== NS_DD
) << 8;
18379 do_vfp_cond_or_thumb ();
18381 /* ARMv8.2 fp16 vrint instruction. */
18383 do_scalar_fp16_v82_encode ();
18387 /* Neon encodings (or something broken...). */
18389 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
18391 if (et
.type
== NT_invtype
)
18394 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18395 NEON_ENCODE (FLOAT
, inst
);
18397 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18400 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18401 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18402 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18403 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18404 inst
.instruction
|= neon_quad (rs
) << 6;
18405 /* Mask off the original size bits and reencode them. */
18406 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
18407 | neon_logbits (et
.size
) << 18);
18411 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
18412 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
18413 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
18414 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
18415 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
18416 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
18417 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
18422 inst
.instruction
|= 0xfc000000;
18424 inst
.instruction
|= 0xf0000000;
18431 do_vrint_1 (neon_cvt_mode_x
);
18437 do_vrint_1 (neon_cvt_mode_z
);
18443 do_vrint_1 (neon_cvt_mode_r
);
18449 do_vrint_1 (neon_cvt_mode_a
);
18455 do_vrint_1 (neon_cvt_mode_n
);
18461 do_vrint_1 (neon_cvt_mode_p
);
18467 do_vrint_1 (neon_cvt_mode_m
);
18471 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
18473 unsigned regno
= NEON_SCALAR_REG (opnd
);
18474 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
18476 if (elsize
== 16 && elno
< 2 && regno
< 16)
18477 return regno
| (elno
<< 4);
18478 else if (elsize
== 32 && elno
== 0)
18481 first_error (_("scalar out of range"));
18488 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18490 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18491 _("expression too complex"));
18492 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18493 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
18494 _("immediate out of range"));
18496 if (inst
.operands
[2].isscalar
)
18498 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
18499 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18500 N_KEY
| N_F16
| N_F32
).size
;
18501 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
18503 inst
.instruction
= 0xfe000800;
18504 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18505 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18506 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18507 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
18508 inst
.instruction
|= LOW4 (m
);
18509 inst
.instruction
|= HI1 (m
) << 5;
18510 inst
.instruction
|= neon_quad (rs
) << 6;
18511 inst
.instruction
|= rot
<< 20;
18512 inst
.instruction
|= (size
== 32) << 23;
18516 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18517 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18518 N_KEY
| N_F16
| N_F32
).size
;
18519 neon_three_same (neon_quad (rs
), 0, -1);
18520 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18521 inst
.instruction
|= 0xfc200800;
18522 inst
.instruction
|= rot
<< 23;
18523 inst
.instruction
|= (size
== 32) << 20;
18530 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18532 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18533 _("expression too complex"));
18534 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18535 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
18536 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18537 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18538 N_KEY
| N_F16
| N_F32
).size
;
18539 neon_three_same (neon_quad (rs
), 0, -1);
18540 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18541 inst
.instruction
|= 0xfc800800;
18542 inst
.instruction
|= (rot
== 270) << 24;
18543 inst
.instruction
|= (size
== 32) << 20;
18546 /* Dot Product instructions encoding support. */
18549 do_neon_dotproduct (int unsigned_p
)
18551 enum neon_shape rs
;
18552 unsigned scalar_oprd2
= 0;
18555 if (inst
.cond
!= COND_ALWAYS
)
18556 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18557 "is UNPREDICTABLE"));
18559 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18562 /* Dot Product instructions are in three-same D/Q register format or the third
18563 operand can be a scalar index register. */
18564 if (inst
.operands
[2].isscalar
)
18566 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
18567 high8
= 0xfe000000;
18568 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
18572 high8
= 0xfc000000;
18573 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18577 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
18579 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
18581 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18582 Product instruction, so we pass 0 as the "ubit" parameter. And the
18583 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18584 neon_three_same (neon_quad (rs
), 0, 32);
18586 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18587 different NEON three-same encoding. */
18588 inst
.instruction
&= 0x00ffffff;
18589 inst
.instruction
|= high8
;
18590 /* Encode 'U' bit which indicates signedness. */
18591 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
18592 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18593 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18594 the instruction encoding. */
18595 if (inst
.operands
[2].isscalar
)
18597 inst
.instruction
&= 0xffffffd0;
18598 inst
.instruction
|= LOW4 (scalar_oprd2
);
18599 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
18603 /* Dot Product instructions for signed integer. */
18606 do_neon_dotproduct_s (void)
18608 return do_neon_dotproduct (0);
18611 /* Dot Product instructions for unsigned integer. */
18614 do_neon_dotproduct_u (void)
18616 return do_neon_dotproduct (1);
18619 /* Crypto v1 instructions. */
18621 do_crypto_2op_1 (unsigned elttype
, int op
)
18623 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18625 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
18631 NEON_ENCODE (INTEGER
, inst
);
18632 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18633 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18634 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18635 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18637 inst
.instruction
|= op
<< 6;
18640 inst
.instruction
|= 0xfc000000;
18642 inst
.instruction
|= 0xf0000000;
18646 do_crypto_3op_1 (int u
, int op
)
18648 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18650 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
18651 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
18656 NEON_ENCODE (INTEGER
, inst
);
18657 neon_three_same (1, u
, 8 << op
);
18663 do_crypto_2op_1 (N_8
, 0);
18669 do_crypto_2op_1 (N_8
, 1);
18675 do_crypto_2op_1 (N_8
, 2);
18681 do_crypto_2op_1 (N_8
, 3);
18687 do_crypto_3op_1 (0, 0);
18693 do_crypto_3op_1 (0, 1);
18699 do_crypto_3op_1 (0, 2);
18705 do_crypto_3op_1 (0, 3);
18711 do_crypto_3op_1 (1, 0);
18717 do_crypto_3op_1 (1, 1);
18721 do_sha256su1 (void)
18723 do_crypto_3op_1 (1, 2);
18729 do_crypto_2op_1 (N_32
, -1);
18735 do_crypto_2op_1 (N_32
, 0);
18739 do_sha256su0 (void)
18741 do_crypto_2op_1 (N_32
, 1);
18745 do_crc32_1 (unsigned int poly
, unsigned int sz
)
18747 unsigned int Rd
= inst
.operands
[0].reg
;
18748 unsigned int Rn
= inst
.operands
[1].reg
;
18749 unsigned int Rm
= inst
.operands
[2].reg
;
18751 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18752 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
18753 inst
.instruction
|= LOW4 (Rn
) << 16;
18754 inst
.instruction
|= LOW4 (Rm
);
18755 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
18756 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
18758 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
18759 as_warn (UNPRED_REG ("r15"));
18801 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18803 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
18804 do_vfp_sp_dp_cvt ();
18805 do_vfp_cond_or_thumb ();
18809 /* Overall per-instruction processing. */
18811 /* We need to be able to fix up arbitrary expressions in some statements.
18812 This is so that we can handle symbols that are an arbitrary distance from
18813 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18814 which returns part of an address in a form which will be valid for
18815 a data instruction. We do this by pushing the expression into a symbol
18816 in the expr_section, and creating a fix for that. */
18819 fix_new_arm (fragS
* frag
,
18833 /* Create an absolute valued symbol, so we have something to
18834 refer to in the object file. Unfortunately for us, gas's
18835 generic expression parsing will already have folded out
18836 any use of .set foo/.type foo %function that may have
18837 been used to set type information of the target location,
18838 that's being specified symbolically. We have to presume
18839 the user knows what they are doing. */
18843 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
18845 symbol
= symbol_find_or_make (name
);
18846 S_SET_SEGMENT (symbol
, absolute_section
);
18847 symbol_set_frag (symbol
, &zero_address_frag
);
18848 S_SET_VALUE (symbol
, exp
->X_add_number
);
18849 exp
->X_op
= O_symbol
;
18850 exp
->X_add_symbol
= symbol
;
18851 exp
->X_add_number
= 0;
18857 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
18858 (enum bfd_reloc_code_real
) reloc
);
18862 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
18863 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
18867 /* Mark whether the fix is to a THUMB instruction, or an ARM
18869 new_fix
->tc_fix_data
= thumb_mode
;
18872 /* Create a frg for an instruction requiring relaxation. */
18874 output_relax_insn (void)
18880 /* The size of the instruction is unknown, so tie the debug info to the
18881 start of the instruction. */
18882 dwarf2_emit_insn (0);
18884 switch (inst
.relocs
[0].exp
.X_op
)
18887 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
18888 offset
= inst
.relocs
[0].exp
.X_add_number
;
18892 offset
= inst
.relocs
[0].exp
.X_add_number
;
18895 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
18899 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
18900 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
18901 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
18904 /* Write a 32-bit thumb instruction to buf. */
18906 put_thumb32_insn (char * buf
, unsigned long insn
)
18908 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
18909 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
18913 output_inst (const char * str
)
18919 as_bad ("%s -- `%s'", inst
.error
, str
);
18924 output_relax_insn ();
18927 if (inst
.size
== 0)
18930 to
= frag_more (inst
.size
);
18931 /* PR 9814: Record the thumb mode into the current frag so that we know
18932 what type of NOP padding to use, if necessary. We override any previous
18933 setting so that if the mode has changed then the NOPS that we use will
18934 match the encoding of the last instruction in the frag. */
18935 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18937 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
18939 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
18940 put_thumb32_insn (to
, inst
.instruction
);
18942 else if (inst
.size
> INSN_SIZE
)
18944 gas_assert (inst
.size
== (2 * INSN_SIZE
));
18945 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
18946 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
18949 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
18952 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
18954 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
18955 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
18956 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
18957 inst
.relocs
[r
].type
);
18960 dwarf2_emit_insn (inst
.size
);
18964 output_it_inst (int cond
, int mask
, char * to
)
18966 unsigned long instruction
= 0xbf00;
18969 instruction
|= mask
;
18970 instruction
|= cond
<< 4;
18974 to
= frag_more (2);
18976 dwarf2_emit_insn (2);
18980 md_number_to_chars (to
, instruction
, 2);
18985 /* Tag values used in struct asm_opcode's tag field. */
18988 OT_unconditional
, /* Instruction cannot be conditionalized.
18989 The ARM condition field is still 0xE. */
18990 OT_unconditionalF
, /* Instruction cannot be conditionalized
18991 and carries 0xF in its ARM condition field. */
18992 OT_csuffix
, /* Instruction takes a conditional suffix. */
18993 OT_csuffixF
, /* Some forms of the instruction take a scalar
18994 conditional suffix, others place 0xF where the
18995 condition field would be, others take a vector
18996 conditional suffix. */
18997 OT_cinfix3
, /* Instruction takes a conditional infix,
18998 beginning at character index 3. (In
18999 unified mode, it becomes a suffix.) */
19000 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
19001 tsts, cmps, cmns, and teqs. */
19002 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
19003 character index 3, even in unified mode. Used for
19004 legacy instructions where suffix and infix forms
19005 may be ambiguous. */
19006 OT_csuf_or_in3
, /* Instruction takes either a conditional
19007 suffix or an infix at character index 3. */
19008 OT_odd_infix_unc
, /* This is the unconditional variant of an
19009 instruction that takes a conditional infix
19010 at an unusual position. In unified mode,
19011 this variant will accept a suffix. */
19012 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
19013 are the conditional variants of instructions that
19014 take conditional infixes in unusual positions.
19015 The infix appears at character index
19016 (tag - OT_odd_infix_0). These are not accepted
19017 in unified mode. */
19020 /* Subroutine of md_assemble, responsible for looking up the primary
19021 opcode from the mnemonic the user wrote. STR points to the
19022 beginning of the mnemonic.
19024 This is not simply a hash table lookup, because of conditional
19025 variants. Most instructions have conditional variants, which are
19026 expressed with a _conditional affix_ to the mnemonic. If we were
19027 to encode each conditional variant as a literal string in the opcode
19028 table, it would have approximately 20,000 entries.
19030 Most mnemonics take this affix as a suffix, and in unified syntax,
19031 'most' is upgraded to 'all'. However, in the divided syntax, some
19032 instructions take the affix as an infix, notably the s-variants of
19033 the arithmetic instructions. Of those instructions, all but six
19034 have the infix appear after the third character of the mnemonic.
19036 Accordingly, the algorithm for looking up primary opcodes given
19039 1. Look up the identifier in the opcode table.
19040 If we find a match, go to step U.
19042 2. Look up the last two characters of the identifier in the
19043 conditions table. If we find a match, look up the first N-2
19044 characters of the identifier in the opcode table. If we
19045 find a match, go to step CE.
19047 3. Look up the fourth and fifth characters of the identifier in
19048 the conditions table. If we find a match, extract those
19049 characters from the identifier, and look up the remaining
19050 characters in the opcode table. If we find a match, go
19055 U. Examine the tag field of the opcode structure, in case this is
19056 one of the six instructions with its conditional infix in an
19057 unusual place. If it is, the tag tells us where to find the
19058 infix; look it up in the conditions table and set inst.cond
19059 accordingly. Otherwise, this is an unconditional instruction.
19060 Again set inst.cond accordingly. Return the opcode structure.
19062 CE. Examine the tag field to make sure this is an instruction that
19063 should receive a conditional suffix. If it is not, fail.
19064 Otherwise, set inst.cond from the suffix we already looked up,
19065 and return the opcode structure.
19067 CM. Examine the tag field to make sure this is an instruction that
19068 should receive a conditional infix after the third character.
19069 If it is not, fail. Otherwise, undo the edits to the current
19070 line of input and proceed as for case CE. */
19072 static const struct asm_opcode
*
19073 opcode_lookup (char **str
)
19077 const struct asm_opcode
*opcode
;
19078 const struct asm_cond
*cond
;
19081 /* Scan up to the end of the mnemonic, which must end in white space,
19082 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
19083 for (base
= end
= *str
; *end
!= '\0'; end
++)
19084 if (*end
== ' ' || *end
== '.')
19090 /* Handle a possible width suffix and/or Neon type suffix. */
19095 /* The .w and .n suffixes are only valid if the unified syntax is in
19097 if (unified_syntax
&& end
[1] == 'w')
19099 else if (unified_syntax
&& end
[1] == 'n')
19104 inst
.vectype
.elems
= 0;
19106 *str
= end
+ offset
;
19108 if (end
[offset
] == '.')
19110 /* See if we have a Neon type suffix (possible in either unified or
19111 non-unified ARM syntax mode). */
19112 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
19115 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
19121 /* Look for unaffixed or special-case affixed mnemonic. */
19122 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19127 if (opcode
->tag
< OT_odd_infix_0
)
19129 inst
.cond
= COND_ALWAYS
;
19133 if (warn_on_deprecated
&& unified_syntax
)
19134 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19135 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
19136 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19139 inst
.cond
= cond
->value
;
19142 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
19144 /* Cannot have a conditional suffix on a mnemonic of less than a character.
19146 if (end
- base
< 2)
19149 cond
= (const struct asm_cond
*) hash_find_n (arm_vcond_hsh
, affix
, 1);
19150 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19152 /* If this opcode can not be vector predicated then don't accept it with a
19153 vector predication code. */
19154 if (opcode
&& !opcode
->mayBeVecPred
)
19157 if (!opcode
|| !cond
)
19159 /* Cannot have a conditional suffix on a mnemonic of less than two
19161 if (end
- base
< 3)
19164 /* Look for suffixed mnemonic. */
19166 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19167 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19171 if (opcode
&& cond
)
19174 switch (opcode
->tag
)
19176 case OT_cinfix3_legacy
:
19177 /* Ignore conditional suffixes matched on infix only mnemonics. */
19181 case OT_cinfix3_deprecated
:
19182 case OT_odd_infix_unc
:
19183 if (!unified_syntax
)
19185 /* Fall through. */
19189 case OT_csuf_or_in3
:
19190 inst
.cond
= cond
->value
;
19193 case OT_unconditional
:
19194 case OT_unconditionalF
:
19196 inst
.cond
= cond
->value
;
19199 /* Delayed diagnostic. */
19200 inst
.error
= BAD_COND
;
19201 inst
.cond
= COND_ALWAYS
;
19210 /* Cannot have a usual-position infix on a mnemonic of less than
19211 six characters (five would be a suffix). */
19212 if (end
- base
< 6)
19215 /* Look for infixed mnemonic in the usual position. */
19217 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19221 memcpy (save
, affix
, 2);
19222 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
19223 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19225 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
19226 memcpy (affix
, save
, 2);
19229 && (opcode
->tag
== OT_cinfix3
19230 || opcode
->tag
== OT_cinfix3_deprecated
19231 || opcode
->tag
== OT_csuf_or_in3
19232 || opcode
->tag
== OT_cinfix3_legacy
))
19235 if (warn_on_deprecated
&& unified_syntax
19236 && (opcode
->tag
== OT_cinfix3
19237 || opcode
->tag
== OT_cinfix3_deprecated
))
19238 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19240 inst
.cond
= cond
->value
;
19247 /* This function generates an initial IT instruction, leaving its block
19248 virtually open for the new instructions. Eventually,
19249 the mask will be updated by now_pred_add_mask () each time
19250 a new instruction needs to be included in the IT block.
19251 Finally, the block is closed with close_automatic_it_block ().
19252 The block closure can be requested either from md_assemble (),
19253 a tencode (), or due to a label hook. */
19256 new_automatic_it_block (int cond
)
19258 now_pred
.state
= AUTOMATIC_PRED_BLOCK
;
19259 now_pred
.mask
= 0x18;
19260 now_pred
.cc
= cond
;
19261 now_pred
.block_length
= 1;
19262 mapping_state (MAP_THUMB
);
19263 now_pred
.insn
= output_it_inst (cond
, now_pred
.mask
, NULL
);
19264 now_pred
.warn_deprecated
= FALSE
;
19265 now_pred
.insn_cond
= TRUE
;
19268 /* Close an automatic IT block.
19269 See comments in new_automatic_it_block (). */
19272 close_automatic_it_block (void)
19274 now_pred
.mask
= 0x10;
19275 now_pred
.block_length
= 0;
19278 /* Update the mask of the current automatically-generated IT
19279 instruction. See comments in new_automatic_it_block (). */
19282 now_pred_add_mask (int cond
)
19284 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
19285 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
19286 | ((bitvalue) << (nbit)))
19287 const int resulting_bit
= (cond
& 1);
19289 now_pred
.mask
&= 0xf;
19290 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19292 (5 - now_pred
.block_length
));
19293 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19295 ((5 - now_pred
.block_length
) - 1));
19296 output_it_inst (now_pred
.cc
, now_pred
.mask
, now_pred
.insn
);
19299 #undef SET_BIT_VALUE
19302 /* The IT blocks handling machinery is accessed through the these functions:
19303 it_fsm_pre_encode () from md_assemble ()
19304 set_pred_insn_type () optional, from the tencode functions
19305 set_pred_insn_type_last () ditto
19306 in_pred_block () ditto
19307 it_fsm_post_encode () from md_assemble ()
19308 force_automatic_it_block_close () from label handling functions
19311 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
19312 initializing the IT insn type with a generic initial value depending
19313 on the inst.condition.
19314 2) During the tencode function, two things may happen:
19315 a) The tencode function overrides the IT insn type by
19316 calling either set_pred_insn_type (type) or
19317 set_pred_insn_type_last ().
19318 b) The tencode function queries the IT block state by
19319 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
19321 Both set_pred_insn_type and in_pred_block run the internal FSM state
19322 handling function (handle_pred_state), because: a) setting the IT insn
19323 type may incur in an invalid state (exiting the function),
19324 and b) querying the state requires the FSM to be updated.
19325 Specifically we want to avoid creating an IT block for conditional
19326 branches, so it_fsm_pre_encode is actually a guess and we can't
19327 determine whether an IT block is required until the tencode () routine
19328 has decided what type of instruction this actually it.
19329 Because of this, if set_pred_insn_type and in_pred_block have to be
19330 used, set_pred_insn_type has to be called first.
19332 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
19333 that determines the insn IT type depending on the inst.cond code.
19334 When a tencode () routine encodes an instruction that can be
19335 either outside an IT block, or, in the case of being inside, has to be
19336 the last one, set_pred_insn_type_last () will determine the proper
19337 IT instruction type based on the inst.cond code. Otherwise,
19338 set_pred_insn_type can be called for overriding that logic or
19339 for covering other cases.
19341 Calling handle_pred_state () may not transition the IT block state to
19342 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
19343 still queried. Instead, if the FSM determines that the state should
19344 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
19345 after the tencode () function: that's what it_fsm_post_encode () does.
19347 Since in_pred_block () calls the state handling function to get an
19348 updated state, an error may occur (due to invalid insns combination).
19349 In that case, inst.error is set.
19350 Therefore, inst.error has to be checked after the execution of
19351 the tencode () routine.
19353 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
19354 any pending state change (if any) that didn't take place in
19355 handle_pred_state () as explained above. */
19358 it_fsm_pre_encode (void)
19360 if (inst
.cond
!= COND_ALWAYS
)
19361 inst
.pred_insn_type
= INSIDE_IT_INSN
;
19363 inst
.pred_insn_type
= OUTSIDE_PRED_INSN
;
19365 now_pred
.state_handled
= 0;
19368 /* IT state FSM handling function. */
19369 /* MVE instructions and non-MVE instructions are handled differently because of
19370 the introduction of VPT blocks.
19371 Specifications say that any non-MVE instruction inside a VPT block is
19372 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
19373 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
19374 few exceptions this will be handled at their respective handler functions.
19375 The error messages provided depending on the different combinations possible
19376 are described in the cases below:
19377 For 'most' MVE instructions:
19378 1) In an IT block, with an IT code: syntax error
19379 2) In an IT block, with a VPT code: error: must be in a VPT block
19380 3) In an IT block, with no code: warning: UNPREDICTABLE
19381 4) In a VPT block, with an IT code: syntax error
19382 5) In a VPT block, with a VPT code: OK!
19383 6) In a VPT block, with no code: error: missing code
19384 7) Outside a pred block, with an IT code: error: syntax error
19385 8) Outside a pred block, with a VPT code: error: should be in a VPT block
19386 9) Outside a pred block, with no code: OK!
19387 For non-MVE instructions:
19388 10) In an IT block, with an IT code: OK!
19389 11) In an IT block, with a VPT code: syntax error
19390 12) In an IT block, with no code: error: missing code
19391 13) In a VPT block, with an IT code: error: should be in an IT block
19392 14) In a VPT block, with a VPT code: syntax error
19393 15) In a VPT block, with no code: UNPREDICTABLE
19394 16) Outside a pred block, with an IT code: error: should be in an IT block
19395 17) Outside a pred block, with a VPT code: syntax error
19396 18) Outside a pred block, with no code: OK!
19401 handle_pred_state (void)
19403 now_pred
.state_handled
= 1;
19404 now_pred
.insn_cond
= FALSE
;
19406 switch (now_pred
.state
)
19408 case OUTSIDE_PRED_BLOCK
:
19409 switch (inst
.pred_insn_type
)
19411 case MVE_OUTSIDE_PRED_INSN
:
19412 if (inst
.cond
< COND_ALWAYS
)
19414 /* Case 7: Outside a pred block, with an IT code: error: syntax
19416 inst
.error
= BAD_SYNTAX
;
19419 /* Case 9: Outside a pred block, with no code: OK! */
19421 case OUTSIDE_PRED_INSN
:
19422 if (inst
.cond
> COND_ALWAYS
)
19424 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19426 inst
.error
= BAD_SYNTAX
;
19429 /* Case 18: Outside a pred block, with no code: OK! */
19432 case INSIDE_VPT_INSN
:
19433 /* Case 8: Outside a pred block, with a VPT code: error: should be in
19435 inst
.error
= BAD_OUT_VPT
;
19438 case INSIDE_IT_INSN
:
19439 case INSIDE_IT_LAST_INSN
:
19440 if (inst
.cond
< COND_ALWAYS
)
19442 /* Case 16: Outside a pred block, with an IT code: error: should
19443 be in an IT block. */
19444 if (thumb_mode
== 0)
19447 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
19448 as_tsktsk (_("Warning: conditional outside an IT block"\
19453 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
19454 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
19456 /* Automatically generate the IT instruction. */
19457 new_automatic_it_block (inst
.cond
);
19458 if (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
)
19459 close_automatic_it_block ();
19463 inst
.error
= BAD_OUT_IT
;
19469 else if (inst
.cond
> COND_ALWAYS
)
19471 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19473 inst
.error
= BAD_SYNTAX
;
19478 case IF_INSIDE_IT_LAST_INSN
:
19479 case NEUTRAL_IT_INSN
:
19483 if (inst
.cond
!= COND_ALWAYS
)
19484 first_error (BAD_SYNTAX
);
19485 now_pred
.state
= MANUAL_PRED_BLOCK
;
19486 now_pred
.block_length
= 0;
19487 now_pred
.type
= VECTOR_PRED
;
19491 now_pred
.state
= MANUAL_PRED_BLOCK
;
19492 now_pred
.block_length
= 0;
19493 now_pred
.type
= SCALAR_PRED
;
19498 case AUTOMATIC_PRED_BLOCK
:
19499 /* Three things may happen now:
19500 a) We should increment current it block size;
19501 b) We should close current it block (closing insn or 4 insns);
19502 c) We should close current it block and start a new one (due
19503 to incompatible conditions or
19504 4 insns-length block reached). */
19506 switch (inst
.pred_insn_type
)
19508 case INSIDE_VPT_INSN
:
19510 case MVE_OUTSIDE_PRED_INSN
:
19512 case OUTSIDE_PRED_INSN
:
19513 /* The closure of the block shall happen immediately,
19514 so any in_pred_block () call reports the block as closed. */
19515 force_automatic_it_block_close ();
19518 case INSIDE_IT_INSN
:
19519 case INSIDE_IT_LAST_INSN
:
19520 case IF_INSIDE_IT_LAST_INSN
:
19521 now_pred
.block_length
++;
19523 if (now_pred
.block_length
> 4
19524 || !now_pred_compatible (inst
.cond
))
19526 force_automatic_it_block_close ();
19527 if (inst
.pred_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
19528 new_automatic_it_block (inst
.cond
);
19532 now_pred
.insn_cond
= TRUE
;
19533 now_pred_add_mask (inst
.cond
);
19536 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
19537 && (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
19538 || inst
.pred_insn_type
== IF_INSIDE_IT_LAST_INSN
))
19539 close_automatic_it_block ();
19542 case NEUTRAL_IT_INSN
:
19543 now_pred
.block_length
++;
19544 now_pred
.insn_cond
= TRUE
;
19546 if (now_pred
.block_length
> 4)
19547 force_automatic_it_block_close ();
19549 now_pred_add_mask (now_pred
.cc
& 1);
19553 close_automatic_it_block ();
19554 now_pred
.state
= MANUAL_PRED_BLOCK
;
19559 case MANUAL_PRED_BLOCK
:
19562 if (now_pred
.type
== SCALAR_PRED
)
19564 /* Check conditional suffixes. */
19565 cond
= now_pred
.cc
^ ((now_pred
.mask
>> 4) & 1) ^ 1;
19566 now_pred
.mask
<<= 1;
19567 now_pred
.mask
&= 0x1f;
19568 is_last
= (now_pred
.mask
== 0x10);
19572 now_pred
.cc
^= (now_pred
.mask
>> 4);
19573 cond
= now_pred
.cc
+ 0xf;
19574 now_pred
.mask
<<= 1;
19575 now_pred
.mask
&= 0x1f;
19576 is_last
= now_pred
.mask
== 0x10;
19578 now_pred
.insn_cond
= TRUE
;
19580 switch (inst
.pred_insn_type
)
19582 case OUTSIDE_PRED_INSN
:
19583 if (now_pred
.type
== SCALAR_PRED
)
19585 if (inst
.cond
== COND_ALWAYS
)
19587 /* Case 12: In an IT block, with no code: error: missing
19589 inst
.error
= BAD_NOT_IT
;
19592 else if (inst
.cond
> COND_ALWAYS
)
19594 /* Case 11: In an IT block, with a VPT code: syntax error.
19596 inst
.error
= BAD_SYNTAX
;
19599 else if (thumb_mode
)
19601 /* This is for some special cases where a non-MVE
19602 instruction is not allowed in an IT block, such as cbz,
19603 but are put into one with a condition code.
19604 You could argue this should be a syntax error, but we
19605 gave the 'not allowed in IT block' diagnostic in the
19606 past so we will keep doing so. */
19607 inst
.error
= BAD_NOT_IT
;
19614 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
19615 as_tsktsk (MVE_NOT_VPT
);
19618 case MVE_OUTSIDE_PRED_INSN
:
19619 if (now_pred
.type
== SCALAR_PRED
)
19621 if (inst
.cond
== COND_ALWAYS
)
19623 /* Case 3: In an IT block, with no code: warning:
19625 as_tsktsk (MVE_NOT_IT
);
19628 else if (inst
.cond
< COND_ALWAYS
)
19630 /* Case 1: In an IT block, with an IT code: syntax error.
19632 inst
.error
= BAD_SYNTAX
;
19640 if (inst
.cond
< COND_ALWAYS
)
19642 /* Case 4: In a VPT block, with an IT code: syntax error.
19644 inst
.error
= BAD_SYNTAX
;
19647 else if (inst
.cond
== COND_ALWAYS
)
19649 /* Case 6: In a VPT block, with no code: error: missing
19651 inst
.error
= BAD_NOT_VPT
;
19659 case INSIDE_IT_INSN
:
19660 if (inst
.cond
> COND_ALWAYS
)
19662 /* Case 11: In an IT block, with a VPT code: syntax error. */
19663 /* Case 14: In a VPT block, with a VPT code: syntax error. */
19664 inst
.error
= BAD_SYNTAX
;
19667 else if (now_pred
.type
== SCALAR_PRED
)
19669 /* Case 10: In an IT block, with an IT code: OK! */
19670 if (cond
!= inst
.cond
)
19672 inst
.error
= now_pred
.type
== SCALAR_PRED
? BAD_IT_COND
:
19679 /* Case 13: In a VPT block, with an IT code: error: should be
19681 inst
.error
= BAD_OUT_IT
;
19686 case INSIDE_VPT_INSN
:
19687 if (now_pred
.type
== SCALAR_PRED
)
19689 /* Case 2: In an IT block, with a VPT code: error: must be in a
19691 inst
.error
= BAD_OUT_VPT
;
19694 /* Case 5: In a VPT block, with a VPT code: OK! */
19695 else if (cond
!= inst
.cond
)
19697 inst
.error
= BAD_VPT_COND
;
19701 case INSIDE_IT_LAST_INSN
:
19702 case IF_INSIDE_IT_LAST_INSN
:
19703 if (now_pred
.type
== VECTOR_PRED
|| inst
.cond
> COND_ALWAYS
)
19705 /* Case 4: In a VPT block, with an IT code: syntax error. */
19706 /* Case 11: In an IT block, with a VPT code: syntax error. */
19707 inst
.error
= BAD_SYNTAX
;
19710 else if (cond
!= inst
.cond
)
19712 inst
.error
= BAD_IT_COND
;
19717 inst
.error
= BAD_BRANCH
;
19722 case NEUTRAL_IT_INSN
:
19723 /* The BKPT instruction is unconditional even in a IT or VPT
19728 if (now_pred
.type
== SCALAR_PRED
)
19730 inst
.error
= BAD_IT_IT
;
19733 /* fall through. */
19735 if (inst
.cond
== COND_ALWAYS
)
19737 /* Executing a VPT/VPST instruction inside an IT block or a
19738 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
19740 if (now_pred
.type
== SCALAR_PRED
)
19741 as_tsktsk (MVE_NOT_IT
);
19743 as_tsktsk (MVE_NOT_VPT
);
19748 /* VPT/VPST do not accept condition codes. */
19749 inst
.error
= BAD_SYNTAX
;
19760 struct depr_insn_mask
19762 unsigned long pattern
;
19763 unsigned long mask
;
19764 const char* description
;
19767 /* List of 16-bit instruction patterns deprecated in an IT block in
19769 static const struct depr_insn_mask depr_it_insns
[] = {
19770 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19771 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19772 { 0xa000, 0xb800, N_("ADR") },
19773 { 0x4800, 0xf800, N_("Literal loads") },
19774 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19775 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19776 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19777 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19778 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19783 it_fsm_post_encode (void)
19787 if (!now_pred
.state_handled
)
19788 handle_pred_state ();
19790 if (now_pred
.insn_cond
19791 && !now_pred
.warn_deprecated
19792 && warn_on_deprecated
19793 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
19794 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
19796 if (inst
.instruction
>= 0x10000)
19798 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19799 "performance deprecated in ARMv8-A and ARMv8-R"));
19800 now_pred
.warn_deprecated
= TRUE
;
19804 const struct depr_insn_mask
*p
= depr_it_insns
;
19806 while (p
->mask
!= 0)
19808 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
19810 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19811 "instructions of the following class are "
19812 "performance deprecated in ARMv8-A and "
19813 "ARMv8-R: %s"), p
->description
);
19814 now_pred
.warn_deprecated
= TRUE
;
19822 if (now_pred
.block_length
> 1)
19824 as_tsktsk (_("IT blocks containing more than one conditional "
19825 "instruction are performance deprecated in ARMv8-A and "
19827 now_pred
.warn_deprecated
= TRUE
;
19831 is_last
= (now_pred
.mask
== 0x10);
19834 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
19840 force_automatic_it_block_close (void)
19842 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
19844 close_automatic_it_block ();
19845 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
19851 in_pred_block (void)
19853 if (!now_pred
.state_handled
)
19854 handle_pred_state ();
19856 return now_pred
.state
!= OUTSIDE_PRED_BLOCK
;
19859 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19860 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19861 here, hence the "known" in the function name. */
19864 known_t32_only_insn (const struct asm_opcode
*opcode
)
19866 /* Original Thumb-1 wide instruction. */
19867 if (opcode
->tencode
== do_t_blx
19868 || opcode
->tencode
== do_t_branch23
19869 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
19870 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
19873 /* Wide-only instruction added to ARMv8-M Baseline. */
19874 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
19875 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
19876 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
19877 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
19883 /* Whether wide instruction variant can be used if available for a valid OPCODE
19887 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
19889 if (known_t32_only_insn (opcode
))
19892 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19893 of variant T3 of B.W is checked in do_t_branch. */
19894 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19895 && opcode
->tencode
== do_t_branch
)
19898 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19899 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19900 && opcode
->tencode
== do_t_mov_cmp
19901 /* Make sure CMP instruction is not affected. */
19902 && opcode
->aencode
== do_mov
)
19905 /* Wide instruction variants of all instructions with narrow *and* wide
19906 variants become available with ARMv6t2. Other opcodes are either
19907 narrow-only or wide-only and are thus available if OPCODE is valid. */
19908 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
19911 /* OPCODE with narrow only instruction variant or wide variant not
19917 md_assemble (char *str
)
19920 const struct asm_opcode
* opcode
;
19922 /* Align the previous label if needed. */
19923 if (last_label_seen
!= NULL
)
19925 symbol_set_frag (last_label_seen
, frag_now
);
19926 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
19927 S_SET_SEGMENT (last_label_seen
, now_seg
);
19930 memset (&inst
, '\0', sizeof (inst
));
19932 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
19933 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
19935 opcode
= opcode_lookup (&p
);
19938 /* It wasn't an instruction, but it might be a register alias of
19939 the form alias .req reg, or a Neon .dn/.qn directive. */
19940 if (! create_register_alias (str
, p
)
19941 && ! create_neon_reg_alias (str
, p
))
19942 as_bad (_("bad instruction `%s'"), str
);
19947 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
19948 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19950 /* The value which unconditional instructions should have in place of the
19951 condition field. */
19952 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
19956 arm_feature_set variant
;
19958 variant
= cpu_variant
;
19959 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19960 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
19961 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
19962 /* Check that this instruction is supported for this CPU. */
19963 if (!opcode
->tvariant
19964 || (thumb_mode
== 1
19965 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
19967 if (opcode
->tencode
== do_t_swi
)
19968 as_bad (_("SVC is not permitted on this architecture"));
19970 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
19973 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
19974 && opcode
->tencode
!= do_t_branch
)
19976 as_bad (_("Thumb does not support conditional execution"));
19980 /* Two things are addressed here:
19981 1) Implicit require narrow instructions on Thumb-1.
19982 This avoids relaxation accidentally introducing Thumb-2
19984 2) Reject wide instructions in non Thumb-2 cores.
19986 Only instructions with narrow and wide variants need to be handled
19987 but selecting all non wide-only instructions is easier. */
19988 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
19989 && !t32_insn_ok (variant
, opcode
))
19991 if (inst
.size_req
== 0)
19993 else if (inst
.size_req
== 4)
19995 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
19996 as_bad (_("selected processor does not support 32bit wide "
19997 "variant of instruction `%s'"), str
);
19999 as_bad (_("selected processor does not support `%s' in "
20000 "Thumb-2 mode"), str
);
20005 inst
.instruction
= opcode
->tvalue
;
20007 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
20009 /* Prepare the pred_insn_type for those encodings that don't set
20011 it_fsm_pre_encode ();
20013 opcode
->tencode ();
20015 it_fsm_post_encode ();
20018 if (!(inst
.error
|| inst
.relax
))
20020 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
20021 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
20022 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
20024 as_bad (_("cannot honor width suffix -- `%s'"), str
);
20029 /* Something has gone badly wrong if we try to relax a fixed size
20031 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
20033 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
20034 *opcode
->tvariant
);
20035 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
20036 set those bits when Thumb-2 32-bit instructions are seen. The impact
20037 of relaxable instructions will be considered later after we finish all
20039 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
20040 variant
= arm_arch_none
;
20042 variant
= cpu_variant
;
20043 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
20044 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
20047 check_neon_suffixes
;
20051 mapping_state (MAP_THUMB
);
20054 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
20058 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
20059 is_bx
= (opcode
->aencode
== do_bx
);
20061 /* Check that this instruction is supported for this CPU. */
20062 if (!(is_bx
&& fix_v4bx
)
20063 && !(opcode
->avariant
&&
20064 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
20066 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
20071 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
20075 inst
.instruction
= opcode
->avalue
;
20076 if (opcode
->tag
== OT_unconditionalF
)
20077 inst
.instruction
|= 0xFU
<< 28;
20079 inst
.instruction
|= inst
.cond
<< 28;
20080 inst
.size
= INSN_SIZE
;
20081 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
20083 it_fsm_pre_encode ();
20084 opcode
->aencode ();
20085 it_fsm_post_encode ();
20087 /* Arm mode bx is marked as both v4T and v5 because it's still required
20088 on a hypothetical non-thumb v5 core. */
20090 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
20092 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
20093 *opcode
->avariant
);
20095 check_neon_suffixes
;
20099 mapping_state (MAP_ARM
);
20104 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
20112 check_pred_blocks_finished (void)
20117 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
20118 if (seg_info (sect
)->tc_segment_info_data
.current_pred
.state
20119 == MANUAL_PRED_BLOCK
)
20121 if (now_pred
.type
== SCALAR_PRED
)
20122 as_warn (_("section '%s' finished with an open IT block."),
20125 as_warn (_("section '%s' finished with an open VPT/VPST block."),
20129 if (now_pred
.state
== MANUAL_PRED_BLOCK
)
20131 if (now_pred
.type
== SCALAR_PRED
)
20132 as_warn (_("file finished with an open IT block."));
20134 as_warn (_("file finished with an open VPT/VPST block."));
20139 /* Various frobbings of labels and their addresses. */
20142 arm_start_line_hook (void)
20144 last_label_seen
= NULL
;
20148 arm_frob_label (symbolS
* sym
)
20150 last_label_seen
= sym
;
20152 ARM_SET_THUMB (sym
, thumb_mode
);
20154 #if defined OBJ_COFF || defined OBJ_ELF
20155 ARM_SET_INTERWORK (sym
, support_interwork
);
20158 force_automatic_it_block_close ();
20160 /* Note - do not allow local symbols (.Lxxx) to be labelled
20161 as Thumb functions. This is because these labels, whilst
20162 they exist inside Thumb code, are not the entry points for
20163 possible ARM->Thumb calls. Also, these labels can be used
20164 as part of a computed goto or switch statement. eg gcc
20165 can generate code that looks like this:
20167 ldr r2, [pc, .Laaa]
20177 The first instruction loads the address of the jump table.
20178 The second instruction converts a table index into a byte offset.
20179 The third instruction gets the jump address out of the table.
20180 The fourth instruction performs the jump.
20182 If the address stored at .Laaa is that of a symbol which has the
20183 Thumb_Func bit set, then the linker will arrange for this address
20184 to have the bottom bit set, which in turn would mean that the
20185 address computation performed by the third instruction would end
20186 up with the bottom bit set. Since the ARM is capable of unaligned
20187 word loads, the instruction would then load the incorrect address
20188 out of the jump table, and chaos would ensue. */
20189 if (label_is_thumb_function_name
20190 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
20191 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
20193 /* When the address of a Thumb function is taken the bottom
20194 bit of that address should be set. This will allow
20195 interworking between Arm and Thumb functions to work
20198 THUMB_SET_FUNC (sym
, 1);
20200 label_is_thumb_function_name
= FALSE
;
20203 dwarf2_emit_label (sym
);
20207 arm_data_in_code (void)
20209 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
20211 *input_line_pointer
= '/';
20212 input_line_pointer
+= 5;
20213 *input_line_pointer
= 0;
20221 arm_canonicalize_symbol_name (char * name
)
20225 if (thumb_mode
&& (len
= strlen (name
)) > 5
20226 && streq (name
+ len
- 5, "/data"))
20227 *(name
+ len
- 5) = 0;
20232 /* Table of all register names defined by default. The user can
20233 define additional names with .req. Note that all register names
20234 should appear in both upper and lowercase variants. Some registers
20235 also have mixed-case names. */
20237 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
20238 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
20239 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
20240 #define REGSET(p,t) \
20241 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
20242 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
20243 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
20244 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
20245 #define REGSETH(p,t) \
20246 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
20247 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
20248 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
20249 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
20250 #define REGSET2(p,t) \
20251 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
20252 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
20253 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
20254 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
20255 #define SPLRBANK(base,bank,t) \
20256 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
20257 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
20258 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
20259 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
20260 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
20261 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
20263 static const struct reg_entry reg_names
[] =
20265 /* ARM integer registers. */
20266 REGSET(r
, RN
), REGSET(R
, RN
),
20268 /* ATPCS synonyms. */
20269 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
20270 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
20271 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
20273 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
20274 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
20275 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
20277 /* Well-known aliases. */
20278 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
20279 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
20281 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
20282 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
20284 /* Coprocessor numbers. */
20285 REGSET(p
, CP
), REGSET(P
, CP
),
20287 /* Coprocessor register numbers. The "cr" variants are for backward
20289 REGSET(c
, CN
), REGSET(C
, CN
),
20290 REGSET(cr
, CN
), REGSET(CR
, CN
),
20292 /* ARM banked registers. */
20293 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
20294 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
20295 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
20296 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
20297 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
20298 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
20299 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
20301 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
20302 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
20303 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
20304 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
20305 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
20306 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
20307 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
20308 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
20310 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
20311 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
20312 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
20313 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
20314 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
20315 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
20316 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
20317 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20318 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20320 /* FPA registers. */
20321 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
20322 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
20324 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
20325 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
20327 /* VFP SP registers. */
20328 REGSET(s
,VFS
), REGSET(S
,VFS
),
20329 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
20331 /* VFP DP Registers. */
20332 REGSET(d
,VFD
), REGSET(D
,VFD
),
20333 /* Extra Neon DP registers. */
20334 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
20336 /* Neon QP registers. */
20337 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
20339 /* VFP control registers. */
20340 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
20341 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
20342 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
20343 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
20344 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
20345 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
20346 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
20348 /* Maverick DSP coprocessor registers. */
20349 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
20350 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
20352 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
20353 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
20354 REGDEF(dspsc
,0,DSPSC
),
20356 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
20357 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
20358 REGDEF(DSPSC
,0,DSPSC
),
20360 /* iWMMXt data registers - p0, c0-15. */
20361 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
20363 /* iWMMXt control registers - p1, c0-3. */
20364 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
20365 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
20366 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
20367 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
20369 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
20370 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
20371 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
20372 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
20373 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
20375 /* XScale accumulator registers. */
20376 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
20382 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
20383 within psr_required_here. */
20384 static const struct asm_psr psrs
[] =
20386 /* Backward compatibility notation. Note that "all" is no longer
20387 truly all possible PSR bits. */
20388 {"all", PSR_c
| PSR_f
},
20392 /* Individual flags. */
20398 /* Combinations of flags. */
20399 {"fs", PSR_f
| PSR_s
},
20400 {"fx", PSR_f
| PSR_x
},
20401 {"fc", PSR_f
| PSR_c
},
20402 {"sf", PSR_s
| PSR_f
},
20403 {"sx", PSR_s
| PSR_x
},
20404 {"sc", PSR_s
| PSR_c
},
20405 {"xf", PSR_x
| PSR_f
},
20406 {"xs", PSR_x
| PSR_s
},
20407 {"xc", PSR_x
| PSR_c
},
20408 {"cf", PSR_c
| PSR_f
},
20409 {"cs", PSR_c
| PSR_s
},
20410 {"cx", PSR_c
| PSR_x
},
20411 {"fsx", PSR_f
| PSR_s
| PSR_x
},
20412 {"fsc", PSR_f
| PSR_s
| PSR_c
},
20413 {"fxs", PSR_f
| PSR_x
| PSR_s
},
20414 {"fxc", PSR_f
| PSR_x
| PSR_c
},
20415 {"fcs", PSR_f
| PSR_c
| PSR_s
},
20416 {"fcx", PSR_f
| PSR_c
| PSR_x
},
20417 {"sfx", PSR_s
| PSR_f
| PSR_x
},
20418 {"sfc", PSR_s
| PSR_f
| PSR_c
},
20419 {"sxf", PSR_s
| PSR_x
| PSR_f
},
20420 {"sxc", PSR_s
| PSR_x
| PSR_c
},
20421 {"scf", PSR_s
| PSR_c
| PSR_f
},
20422 {"scx", PSR_s
| PSR_c
| PSR_x
},
20423 {"xfs", PSR_x
| PSR_f
| PSR_s
},
20424 {"xfc", PSR_x
| PSR_f
| PSR_c
},
20425 {"xsf", PSR_x
| PSR_s
| PSR_f
},
20426 {"xsc", PSR_x
| PSR_s
| PSR_c
},
20427 {"xcf", PSR_x
| PSR_c
| PSR_f
},
20428 {"xcs", PSR_x
| PSR_c
| PSR_s
},
20429 {"cfs", PSR_c
| PSR_f
| PSR_s
},
20430 {"cfx", PSR_c
| PSR_f
| PSR_x
},
20431 {"csf", PSR_c
| PSR_s
| PSR_f
},
20432 {"csx", PSR_c
| PSR_s
| PSR_x
},
20433 {"cxf", PSR_c
| PSR_x
| PSR_f
},
20434 {"cxs", PSR_c
| PSR_x
| PSR_s
},
20435 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
20436 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
20437 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
20438 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
20439 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
20440 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
20441 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
20442 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
20443 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
20444 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
20445 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
20446 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
20447 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
20448 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
20449 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
20450 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
20451 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
20452 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
20453 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
20454 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
20455 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
20456 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
20457 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
20458 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
20461 /* Table of V7M psr names. */
20462 static const struct asm_psr v7m_psrs
[] =
20464 {"apsr", 0x0 }, {"APSR", 0x0 },
20465 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
20466 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
20467 {"psr", 0x3 }, {"PSR", 0x3 },
20468 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
20469 {"ipsr", 0x5 }, {"IPSR", 0x5 },
20470 {"epsr", 0x6 }, {"EPSR", 0x6 },
20471 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
20472 {"msp", 0x8 }, {"MSP", 0x8 },
20473 {"psp", 0x9 }, {"PSP", 0x9 },
20474 {"msplim", 0xa }, {"MSPLIM", 0xa },
20475 {"psplim", 0xb }, {"PSPLIM", 0xb },
20476 {"primask", 0x10}, {"PRIMASK", 0x10},
20477 {"basepri", 0x11}, {"BASEPRI", 0x11},
20478 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
20479 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
20480 {"control", 0x14}, {"CONTROL", 0x14},
20481 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
20482 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
20483 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
20484 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
20485 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
20486 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
20487 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
20488 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
20489 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
20492 /* Table of all shift-in-operand names. */
20493 static const struct asm_shift_name shift_names
[] =
20495 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
20496 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
20497 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
20498 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
20499 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
20500 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
20503 /* Table of all explicit relocation names. */
20505 static struct reloc_entry reloc_names
[] =
20507 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
20508 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
20509 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
20510 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
20511 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
20512 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
20513 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
20514 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
20515 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
20516 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
20517 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
20518 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
20519 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
20520 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
20521 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
20522 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
20523 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
20524 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
20525 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
20526 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
20527 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
20528 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
20529 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
20530 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
20531 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
20532 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
20533 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
20537 /* Table of all conditional affixes. */
20538 static const struct asm_cond conds
[] =
20542 {"cs", 0x2}, {"hs", 0x2},
20543 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
20556 static const struct asm_cond vconds
[] =
20562 #define UL_BARRIER(L,U,CODE,FEAT) \
20563 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
20564 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
20566 static struct asm_barrier_opt barrier_opt_names
[] =
20568 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
20569 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
20570 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
20571 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
20572 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
20573 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
20574 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
20575 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
20576 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
20577 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
20578 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
20579 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
20580 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
20581 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
20582 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
20583 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
20588 /* Table of ARM-format instructions. */
20590 /* Macros for gluing together operand strings. N.B. In all cases
20591 other than OPS0, the trailing OP_stop comes from default
20592 zero-initialization of the unspecified elements of the array. */
20593 #define OPS0() { OP_stop, }
20594 #define OPS1(a) { OP_##a, }
20595 #define OPS2(a,b) { OP_##a,OP_##b, }
20596 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
20597 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
20598 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
20599 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
20601 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
20602 This is useful when mixing operands for ARM and THUMB, i.e. using the
20603 MIX_ARM_THUMB_OPERANDS macro.
20604 In order to use these macros, prefix the number of operands with _
20606 #define OPS_1(a) { a, }
20607 #define OPS_2(a,b) { a,b, }
20608 #define OPS_3(a,b,c) { a,b,c, }
20609 #define OPS_4(a,b,c,d) { a,b,c,d, }
20610 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
20611 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
20613 /* These macros abstract out the exact format of the mnemonic table and
20614 save some repeated characters. */
20616 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
20617 #define TxCE(mnem, op, top, nops, ops, ae, te) \
20618 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
20619 THUMB_VARIANT, do_##ae, do_##te, 0 }
20621 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
20622 a T_MNEM_xyz enumerator. */
20623 #define TCE(mnem, aop, top, nops, ops, ae, te) \
20624 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
20625 #define tCE(mnem, aop, top, nops, ops, ae, te) \
20626 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20628 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
20629 infix after the third character. */
20630 #define TxC3(mnem, op, top, nops, ops, ae, te) \
20631 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
20632 THUMB_VARIANT, do_##ae, do_##te, 0 }
20633 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
20634 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
20635 THUMB_VARIANT, do_##ae, do_##te, 0 }
20636 #define TC3(mnem, aop, top, nops, ops, ae, te) \
20637 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
20638 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
20639 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
20640 #define tC3(mnem, aop, top, nops, ops, ae, te) \
20641 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20642 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
20643 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20645 /* Mnemonic that cannot be conditionalized. The ARM condition-code
20646 field is still 0xE. Many of the Thumb variants can be executed
20647 conditionally, so this is checked separately. */
20648 #define TUE(mnem, op, top, nops, ops, ae, te) \
20649 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20650 THUMB_VARIANT, do_##ae, do_##te, 0 }
20652 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
20653 Used by mnemonics that have very minimal differences in the encoding for
20654 ARM and Thumb variants and can be handled in a common function. */
20655 #define TUEc(mnem, op, top, nops, ops, en) \
20656 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20657 THUMB_VARIANT, do_##en, do_##en, 0 }
20659 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
20660 condition code field. */
20661 #define TUF(mnem, op, top, nops, ops, ae, te) \
20662 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
20663 THUMB_VARIANT, do_##ae, do_##te, 0 }
20665 /* ARM-only variants of all the above. */
20666 #define CE(mnem, op, nops, ops, ae) \
20667 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20669 #define C3(mnem, op, nops, ops, ae) \
20670 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20672 /* Thumb-only variants of TCE and TUE. */
20673 #define ToC(mnem, top, nops, ops, te) \
20674 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20677 #define ToU(mnem, top, nops, ops, te) \
20678 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
20681 /* T_MNEM_xyz enumerator variants of ToC. */
20682 #define toC(mnem, top, nops, ops, te) \
20683 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
20686 /* T_MNEM_xyz enumerator variants of ToU. */
20687 #define toU(mnem, top, nops, ops, te) \
20688 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
20691 /* Legacy mnemonics that always have conditional infix after the third
20693 #define CL(mnem, op, nops, ops, ae) \
20694 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20695 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20697 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
20698 #define cCE(mnem, op, nops, ops, ae) \
20699 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20701 /* Legacy coprocessor instructions where conditional infix and conditional
20702 suffix are ambiguous. For consistency this includes all FPA instructions,
20703 not just the potentially ambiguous ones. */
20704 #define cCL(mnem, op, nops, ops, ae) \
20705 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20706 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20708 /* Coprocessor, takes either a suffix or a position-3 infix
20709 (for an FPA corner case). */
20710 #define C3E(mnem, op, nops, ops, ae) \
20711 { mnem, OPS##nops ops, OT_csuf_or_in3, \
20712 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20714 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
20715 { m1 #m2 m3, OPS##nops ops, \
20716 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
20717 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20719 #define CM(m1, m2, op, nops, ops, ae) \
20720 xCM_ (m1, , m2, op, nops, ops, ae), \
20721 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20722 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20723 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20724 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20725 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20726 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20727 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20728 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20729 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20730 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20731 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20732 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20733 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20734 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20735 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20736 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20737 xCM_ (m1, le, m2, op, nops, ops, ae), \
20738 xCM_ (m1, al, m2, op, nops, ops, ae)
20740 #define UE(mnem, op, nops, ops, ae) \
20741 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20743 #define UF(mnem, op, nops, ops, ae) \
20744 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20746 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20747 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20748 use the same encoding function for each. */
20749 #define NUF(mnem, op, nops, ops, enc) \
20750 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20751 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20753 /* Neon data processing, version which indirects through neon_enc_tab for
20754 the various overloaded versions of opcodes. */
20755 #define nUF(mnem, op, nops, ops, enc) \
20756 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20757 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20759 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20761 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20762 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20763 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20765 #define NCE(mnem, op, nops, ops, enc) \
20766 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20768 #define NCEF(mnem, op, nops, ops, enc) \
20769 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20771 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20772 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20773 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20774 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20776 #define nCE(mnem, op, nops, ops, enc) \
20777 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20779 #define nCEF(mnem, op, nops, ops, enc) \
20780 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20783 #define mCEF(mnem, op, nops, ops, enc) \
20784 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
20785 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20788 /* nCEF but for MVE predicated instructions. */
20789 #define mnCEF(mnem, op, nops, ops, enc) \
20790 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20792 /* nCE but for MVE predicated instructions. */
20793 #define mnCE(mnem, op, nops, ops, enc) \
20794 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20796 /* NUF but for potentially MVE predicated instructions. */
20797 #define MNUF(mnem, op, nops, ops, enc) \
20798 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20799 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20801 /* nUF but for potentially MVE predicated instructions. */
20802 #define mnUF(mnem, op, nops, ops, enc) \
20803 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20804 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20806 /* ToC but for potentially MVE predicated instructions. */
20807 #define mToC(mnem, top, nops, ops, te) \
20808 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20811 /* NCE but for MVE predicated instructions. */
20812 #define MNCE(mnem, op, nops, ops, enc) \
20813 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20815 /* NCEF but for MVE predicated instructions. */
20816 #define MNCEF(mnem, op, nops, ops, enc) \
20817 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20820 static const struct asm_opcode insns
[] =
20822 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20823 #define THUMB_VARIANT & arm_ext_v4t
20824 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20825 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20826 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20827 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20828 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20829 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20830 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20831 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20832 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20833 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20834 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20835 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20836 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20837 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20838 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20839 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20841 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20842 for setting PSR flag bits. They are obsolete in V6 and do not
20843 have Thumb equivalents. */
20844 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20845 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20846 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
20847 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20848 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20849 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
20850 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20851 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20852 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
20854 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
20855 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
20856 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20857 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20859 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
20860 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20861 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
20863 OP_ADDRGLDR
),ldst
, t_ldst
),
20864 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20866 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20867 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20868 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20869 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20870 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20871 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20873 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
20874 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
20877 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
20878 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
20879 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
20880 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
20882 /* Thumb-compatibility pseudo ops. */
20883 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20884 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20885 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20886 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20887 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20888 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20889 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20890 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20891 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
20892 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
20893 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
20894 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
20896 /* These may simplify to neg. */
20897 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20898 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20900 #undef THUMB_VARIANT
20901 #define THUMB_VARIANT & arm_ext_os
20903 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20904 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20906 #undef THUMB_VARIANT
20907 #define THUMB_VARIANT & arm_ext_v6
20909 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
20911 /* V1 instructions with no Thumb analogue prior to V6T2. */
20912 #undef THUMB_VARIANT
20913 #define THUMB_VARIANT & arm_ext_v6t2
20915 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20916 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20917 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
20919 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20920 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20921 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
20922 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20924 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20925 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20927 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20928 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20930 /* V1 instructions with no Thumb analogue at all. */
20931 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
20932 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
20934 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20935 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20936 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20937 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20938 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20939 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20940 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20941 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20944 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20945 #undef THUMB_VARIANT
20946 #define THUMB_VARIANT & arm_ext_v4t
20948 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20949 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20951 #undef THUMB_VARIANT
20952 #define THUMB_VARIANT & arm_ext_v6t2
20954 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20955 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
20957 /* Generic coprocessor instructions. */
20958 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20959 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20960 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20961 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20962 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20963 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20964 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20967 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20969 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20970 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20973 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20974 #undef THUMB_VARIANT
20975 #define THUMB_VARIANT & arm_ext_msr
20977 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
20978 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
20981 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20982 #undef THUMB_VARIANT
20983 #define THUMB_VARIANT & arm_ext_v6t2
20985 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20986 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20987 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20988 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20989 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20990 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20991 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20992 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20995 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20996 #undef THUMB_VARIANT
20997 #define THUMB_VARIANT & arm_ext_v4t
20999 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21000 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21001 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21002 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21003 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21004 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21007 #define ARM_VARIANT & arm_ext_v4t_5
21009 /* ARM Architecture 4T. */
21010 /* Note: bx (and blx) are required on V5, even if the processor does
21011 not support Thumb. */
21012 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
21015 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
21016 #undef THUMB_VARIANT
21017 #define THUMB_VARIANT & arm_ext_v5t
21019 /* Note: blx has 2 variants; the .value coded here is for
21020 BLX(2). Only this variant has conditional execution. */
21021 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
21022 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
21024 #undef THUMB_VARIANT
21025 #define THUMB_VARIANT & arm_ext_v6t2
21027 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
21028 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21029 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21030 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21031 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21032 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
21033 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21034 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21037 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
21038 #undef THUMB_VARIANT
21039 #define THUMB_VARIANT & arm_ext_v5exp
21041 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21042 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21043 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21044 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21046 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21047 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21049 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21050 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21051 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21052 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21054 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21055 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21056 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21057 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21059 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21060 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21062 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21063 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21064 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21065 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21068 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
21069 #undef THUMB_VARIANT
21070 #define THUMB_VARIANT & arm_ext_v6t2
21072 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
21073 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
21075 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
21076 ADDRGLDRS
), ldrd
, t_ldstd
),
21078 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21079 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21082 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
21084 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
21087 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
21088 #undef THUMB_VARIANT
21089 #define THUMB_VARIANT & arm_ext_v6
21091 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
21092 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
21093 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21094 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21095 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21096 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21097 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21098 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21099 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21100 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
21102 #undef THUMB_VARIANT
21103 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21105 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
21106 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21108 #undef THUMB_VARIANT
21109 #define THUMB_VARIANT & arm_ext_v6t2
21111 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21112 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21114 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
21115 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
21117 /* ARM V6 not included in V7M. */
21118 #undef THUMB_VARIANT
21119 #define THUMB_VARIANT & arm_ext_v6_notm
21120 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21121 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21122 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
21123 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
21124 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21125 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21126 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
21127 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21128 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
21129 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21130 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21131 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21132 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21133 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21134 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
21135 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
21136 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21137 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21138 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
21140 /* ARM V6 not included in V7M (eg. integer SIMD). */
21141 #undef THUMB_VARIANT
21142 #define THUMB_VARIANT & arm_ext_v6_dsp
21143 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
21144 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
21145 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21146 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21147 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21148 /* Old name for QASX. */
21149 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21150 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21151 /* Old name for QSAX. */
21152 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21153 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21154 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21155 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21156 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21157 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21158 /* Old name for SASX. */
21159 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21160 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21161 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21162 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21163 /* Old name for SHASX. */
21164 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21165 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21166 /* Old name for SHSAX. */
21167 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21168 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21169 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21170 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21171 /* Old name for SSAX. */
21172 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21173 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21174 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21175 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21176 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21177 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21178 /* Old name for UASX. */
21179 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21180 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21181 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21182 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21183 /* Old name for UHASX. */
21184 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21185 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21186 /* Old name for UHSAX. */
21187 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21188 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21189 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21190 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21191 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21192 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21193 /* Old name for UQASX. */
21194 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21195 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21196 /* Old name for UQSAX. */
21197 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21198 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21199 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21200 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21201 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21202 /* Old name for USAX. */
21203 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21204 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21205 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21206 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21207 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21208 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21209 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21210 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21211 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21212 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21213 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21214 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21215 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21216 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21217 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21218 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21219 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21220 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21221 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21222 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21223 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21224 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21225 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21226 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21227 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21228 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21229 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21230 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21231 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21232 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
21233 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
21234 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21235 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21236 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
21239 #define ARM_VARIANT & arm_ext_v6k_v6t2
21240 #undef THUMB_VARIANT
21241 #define THUMB_VARIANT & arm_ext_v6k_v6t2
21243 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
21244 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
21245 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
21246 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
21248 #undef THUMB_VARIANT
21249 #define THUMB_VARIANT & arm_ext_v6_notm
21250 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
21252 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
21253 RRnpcb
), strexd
, t_strexd
),
21255 #undef THUMB_VARIANT
21256 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21257 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
21259 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
21261 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21263 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21265 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
21268 #define ARM_VARIANT & arm_ext_sec
21269 #undef THUMB_VARIANT
21270 #define THUMB_VARIANT & arm_ext_sec
21272 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
21275 #define ARM_VARIANT & arm_ext_virt
21276 #undef THUMB_VARIANT
21277 #define THUMB_VARIANT & arm_ext_virt
21279 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
21280 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
21283 #define ARM_VARIANT & arm_ext_pan
21284 #undef THUMB_VARIANT
21285 #define THUMB_VARIANT & arm_ext_pan
21287 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
21290 #define ARM_VARIANT & arm_ext_v6t2
21291 #undef THUMB_VARIANT
21292 #define THUMB_VARIANT & arm_ext_v6t2
21294 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
21295 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
21296 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21297 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21299 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
21300 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
21302 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21303 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21304 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21305 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21308 #define ARM_VARIANT & arm_ext_v3
21309 #undef THUMB_VARIANT
21310 #define THUMB_VARIANT & arm_ext_v6t2
21312 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
21313 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
21314 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
21317 #define ARM_VARIANT & arm_ext_v6t2
21318 #undef THUMB_VARIANT
21319 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21320 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21321 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21323 /* Thumb-only instructions. */
21325 #define ARM_VARIANT NULL
21326 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
21327 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
21329 /* ARM does not really have an IT instruction, so always allow it.
21330 The opcode is copied from Thumb in order to allow warnings in
21331 -mimplicit-it=[never | arm] modes. */
21333 #define ARM_VARIANT & arm_ext_v1
21334 #undef THUMB_VARIANT
21335 #define THUMB_VARIANT & arm_ext_v6t2
21337 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
21338 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
21339 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
21340 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
21341 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
21342 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
21343 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
21344 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
21345 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
21346 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
21347 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
21348 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
21349 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
21350 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
21351 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
21352 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
21353 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21354 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21356 /* Thumb2 only instructions. */
21358 #define ARM_VARIANT NULL
21360 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21361 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21362 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21363 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21364 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
21365 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
21367 /* Hardware division instructions. */
21369 #define ARM_VARIANT & arm_ext_adiv
21370 #undef THUMB_VARIANT
21371 #define THUMB_VARIANT & arm_ext_div
21373 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21374 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21376 /* ARM V6M/V7 instructions. */
21378 #define ARM_VARIANT & arm_ext_barrier
21379 #undef THUMB_VARIANT
21380 #define THUMB_VARIANT & arm_ext_barrier
21382 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
21383 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
21384 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
21386 /* ARM V7 instructions. */
21388 #define ARM_VARIANT & arm_ext_v7
21389 #undef THUMB_VARIANT
21390 #define THUMB_VARIANT & arm_ext_v7
21392 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
21393 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
21396 #define ARM_VARIANT & arm_ext_mp
21397 #undef THUMB_VARIANT
21398 #define THUMB_VARIANT & arm_ext_mp
21400 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
21402 /* AArchv8 instructions. */
21404 #define ARM_VARIANT & arm_ext_v8
21406 /* Instructions shared between armv8-a and armv8-m. */
21407 #undef THUMB_VARIANT
21408 #define THUMB_VARIANT & arm_ext_atomics
21410 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21411 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21412 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21413 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21414 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21415 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21416 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21417 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
21418 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21419 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21421 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21423 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21425 #undef THUMB_VARIANT
21426 #define THUMB_VARIANT & arm_ext_v8
21428 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
21429 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
21431 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
21434 /* Defined in V8 but is in undefined encoding space for earlier
21435 architectures. However earlier architectures are required to treat
21436 this instuction as a semihosting trap as well. Hence while not explicitly
21437 defined as such, it is in fact correct to define the instruction for all
21439 #undef THUMB_VARIANT
21440 #define THUMB_VARIANT & arm_ext_v1
21442 #define ARM_VARIANT & arm_ext_v1
21443 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
21445 /* ARMv8 T32 only. */
21447 #define ARM_VARIANT NULL
21448 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
21449 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
21450 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
21452 /* FP for ARMv8. */
21454 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
21455 #undef THUMB_VARIANT
21456 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
21458 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21459 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21460 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21461 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21462 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21463 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21464 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
21465 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
21466 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
21467 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
21468 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
21469 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
21470 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
21471 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
21472 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
21473 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
21474 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
21476 /* Crypto v1 extensions. */
21478 #define ARM_VARIANT & fpu_crypto_ext_armv8
21479 #undef THUMB_VARIANT
21480 #define THUMB_VARIANT & fpu_crypto_ext_armv8
21482 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
21483 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
21484 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
21485 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
21486 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
21487 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
21488 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
21489 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
21490 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
21491 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
21492 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
21493 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
21494 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
21495 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
21498 #define ARM_VARIANT & crc_ext_armv8
21499 #undef THUMB_VARIANT
21500 #define THUMB_VARIANT & crc_ext_armv8
21501 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
21502 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
21503 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
21504 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
21505 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
21506 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
21508 /* ARMv8.2 RAS extension. */
21510 #define ARM_VARIANT & arm_ext_ras
21511 #undef THUMB_VARIANT
21512 #define THUMB_VARIANT & arm_ext_ras
21513 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
21516 #define ARM_VARIANT & arm_ext_v8_3
21517 #undef THUMB_VARIANT
21518 #define THUMB_VARIANT & arm_ext_v8_3
21519 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
21520 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
21521 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
21524 #define ARM_VARIANT & fpu_neon_ext_dotprod
21525 #undef THUMB_VARIANT
21526 #define THUMB_VARIANT & fpu_neon_ext_dotprod
21527 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
21528 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
21531 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
21532 #undef THUMB_VARIANT
21533 #define THUMB_VARIANT NULL
21535 cCE("wfs", e200110
, 1, (RR
), rd
),
21536 cCE("rfs", e300110
, 1, (RR
), rd
),
21537 cCE("wfc", e400110
, 1, (RR
), rd
),
21538 cCE("rfc", e500110
, 1, (RR
), rd
),
21540 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21541 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21542 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21543 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21545 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21546 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21547 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21548 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21550 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
21551 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
21552 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
21553 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
21554 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
21555 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
21556 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
21557 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
21558 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
21559 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
21560 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
21561 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
21563 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
21564 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
21565 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
21566 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
21567 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
21568 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
21569 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
21570 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
21571 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
21572 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
21573 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
21574 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
21576 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
21577 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
21578 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
21579 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
21580 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
21581 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
21582 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
21583 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
21584 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
21585 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
21586 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
21587 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
21589 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
21590 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
21591 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
21592 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
21593 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
21594 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
21595 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
21596 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
21597 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
21598 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
21599 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
21600 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
21602 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
21603 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
21604 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
21605 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
21606 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
21607 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
21608 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
21609 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
21610 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
21611 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
21612 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
21613 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
21615 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
21616 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
21617 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
21618 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
21619 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
21620 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
21621 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
21622 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
21623 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
21624 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
21625 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
21626 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
21628 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
21629 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
21630 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
21631 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
21632 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
21633 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
21634 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
21635 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
21636 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
21637 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
21638 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
21639 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
21641 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
21642 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
21643 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
21644 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
21645 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
21646 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
21647 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
21648 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
21649 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
21650 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
21651 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
21652 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
21654 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
21655 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
21656 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
21657 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
21658 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
21659 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
21660 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
21661 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
21662 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
21663 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
21664 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
21665 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
21667 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
21668 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
21669 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
21670 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
21671 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
21672 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
21673 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
21674 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
21675 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
21676 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
21677 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
21678 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
21680 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
21681 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
21682 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
21683 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
21684 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
21685 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
21686 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
21687 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
21688 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
21689 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
21690 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
21691 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
21693 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
21694 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
21695 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
21696 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
21697 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
21698 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
21699 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
21700 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
21701 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
21702 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
21703 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
21704 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
21706 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
21707 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
21708 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
21709 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
21710 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
21711 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
21712 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
21713 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
21714 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
21715 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
21716 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
21717 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
21719 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
21720 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
21721 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
21722 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
21723 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
21724 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
21725 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
21726 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
21727 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
21728 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
21729 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
21730 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
21732 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
21733 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
21734 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
21735 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
21736 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
21737 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
21738 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
21739 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
21740 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
21741 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
21742 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
21743 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
21745 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
21746 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
21747 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
21748 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
21749 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
21750 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
21751 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
21752 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
21753 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
21754 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
21755 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
21756 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
21758 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21759 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21760 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21761 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21762 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21763 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21764 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21765 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21766 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21767 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21768 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21769 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21771 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21772 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21773 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21774 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21775 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21776 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21777 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21778 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21779 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21780 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21781 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21782 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21784 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21785 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21786 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21787 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21788 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21789 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21790 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21791 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21792 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21793 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21794 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21795 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21797 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21798 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21799 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21800 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21801 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21802 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21803 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21804 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21805 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21806 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21807 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21808 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21810 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21811 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21812 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21813 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21814 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21815 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21816 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21817 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21818 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21819 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21820 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21821 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21823 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21824 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21825 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21826 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21827 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21828 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21829 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21830 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21831 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21832 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21833 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21834 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21836 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21837 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21838 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21839 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21840 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21841 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21842 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21843 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21844 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21845 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21846 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21847 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21849 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21850 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21851 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21852 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21853 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21854 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21855 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21856 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21857 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21858 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21859 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21860 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21862 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21863 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21864 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21865 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21866 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21867 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21868 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21869 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21870 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21871 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21872 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21873 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21875 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21876 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21877 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21878 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21879 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21880 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21881 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21882 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21883 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21884 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21885 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21886 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21888 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21889 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21890 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21891 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21892 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21893 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21894 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21895 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21896 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21897 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21898 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21899 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21901 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21902 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21903 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21904 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21905 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21906 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21907 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21908 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21909 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21910 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21911 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21912 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21914 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21915 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21916 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21917 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21918 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21919 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21920 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21921 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21922 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21923 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21924 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21925 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21927 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21928 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21929 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21930 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21932 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
21933 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
21934 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
21935 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
21936 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
21937 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
21938 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
21939 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
21940 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
21941 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
21942 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
21943 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
21945 /* The implementation of the FIX instruction is broken on some
21946 assemblers, in that it accepts a precision specifier as well as a
21947 rounding specifier, despite the fact that this is meaningless.
21948 To be more compatible, we accept it as well, though of course it
21949 does not set any bits. */
21950 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
21951 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
21952 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
21953 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
21954 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
21955 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
21956 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
21957 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
21958 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
21959 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
21960 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
21961 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
21962 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
21964 /* Instructions that were new with the real FPA, call them V2. */
21966 #define ARM_VARIANT & fpu_fpa_ext_v2
21968 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21969 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21970 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21971 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21972 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21973 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21976 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21978 /* Moves and type conversions. */
21979 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21980 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
21981 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
21982 cCE("fmstat", ef1fa10
, 0, (), noargs
),
21983 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
21984 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
21985 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21986 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21987 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21988 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21989 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21990 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21991 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
21992 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
21994 /* Memory operations. */
21995 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21996 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21997 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21998 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21999 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22000 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22001 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22002 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22003 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22004 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22005 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22006 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22007 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22008 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22009 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22010 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22011 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22012 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22014 /* Monadic operations. */
22015 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22016 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22017 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22019 /* Dyadic operations. */
22020 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22021 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22022 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22023 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22024 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22025 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22026 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22027 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22028 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22031 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22032 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
22033 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22034 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
22036 /* Double precision load/store are still present on single precision
22037 implementations. */
22038 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
22039 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
22040 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22041 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22042 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22043 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22044 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22045 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22046 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22047 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22050 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
22052 /* Moves and type conversions. */
22053 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22054 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22055 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22056 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
22057 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
22058 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
22059 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
22060 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22061 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22062 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22063 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22064 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22065 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22067 /* Monadic operations. */
22068 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22069 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22070 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22072 /* Dyadic operations. */
22073 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22074 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22075 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22076 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22077 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22078 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22079 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22080 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22081 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22084 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22085 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
22086 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22087 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
22090 #define ARM_VARIANT & fpu_vfp_ext_v2
22092 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
22093 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
22094 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
22095 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
22097 /* Instructions which may belong to either the Neon or VFP instruction sets.
22098 Individual encoder functions perform additional architecture checks. */
22100 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22101 #undef THUMB_VARIANT
22102 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
22104 /* These mnemonics are unique to VFP. */
22105 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
22106 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
22107 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22108 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22109 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22110 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22111 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22112 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
22113 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
22114 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
22116 /* Mnemonics shared by Neon and VFP. */
22117 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
22118 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22119 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22121 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22122 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22123 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22124 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22125 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22126 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22128 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
22129 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
22130 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
22131 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
22134 /* NOTE: All VMOV encoding is special-cased! */
22135 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
22136 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
22138 #undef THUMB_VARIANT
22139 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
22140 by different feature bits. Since we are setting the Thumb guard, we can
22141 require Thumb-1 which makes it a nop guard and set the right feature bit in
22142 do_vldr_vstr (). */
22143 #define THUMB_VARIANT & arm_ext_v4t
22144 NCE(vldr
, d100b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22145 NCE(vstr
, d000b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22148 #define ARM_VARIANT & arm_ext_fp16
22149 #undef THUMB_VARIANT
22150 #define THUMB_VARIANT & arm_ext_fp16
22151 /* New instructions added from v8.2, allowing the extraction and insertion of
22152 the upper 16 bits of a 32-bit vector register. */
22153 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
22154 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
22156 /* New backported fma/fms instructions optional in v8.2. */
22157 NCE (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
22158 NCE (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
22160 #undef THUMB_VARIANT
22161 #define THUMB_VARIANT & fpu_neon_ext_v1
22163 #define ARM_VARIANT & fpu_neon_ext_v1
22165 /* Data processing with three registers of the same length. */
22166 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
22167 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
22168 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
22169 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22170 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22171 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22172 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22173 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22174 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22175 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
22176 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22177 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22178 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22179 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22180 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22181 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22182 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22183 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22184 /* If not immediate, fall back to neon_dyadic_i64_su.
22185 shl_imm should accept I8 I16 I32 I64,
22186 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
22187 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
22188 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
22189 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
22190 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
22191 /* Logic ops, types optional & ignored. */
22192 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22193 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22194 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22195 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22196 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22197 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22198 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22199 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22200 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
22201 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
22202 /* Bitfield ops, untyped. */
22203 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22204 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22205 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22206 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22207 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22208 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22209 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
22210 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22211 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22212 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22213 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22214 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22215 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
22216 back to neon_dyadic_if_su. */
22217 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22218 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22219 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22220 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22221 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22222 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22223 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22224 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22225 /* Comparison. Type I8 I16 I32 F32. */
22226 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
22227 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
22228 /* As above, D registers only. */
22229 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22230 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22231 /* Int and float variants, signedness unimportant. */
22232 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22233 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22234 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
22235 /* Add/sub take types I8 I16 I32 I64 F32. */
22236 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22237 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22238 /* vtst takes sizes 8, 16, 32. */
22239 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
22240 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
22241 /* VMUL takes I8 I16 I32 F32 P8. */
22242 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
22243 /* VQD{R}MULH takes S16 S32. */
22244 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22245 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22246 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22247 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22248 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22249 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22250 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22251 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22252 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22253 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22254 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22255 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22256 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22257 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22258 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22259 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22260 /* ARM v8.1 extension. */
22261 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22262 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22263 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22264 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22266 /* Two address, int/float. Types S8 S16 S32 F32. */
22267 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22268 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22270 /* Data processing with two registers and a shift amount. */
22271 /* Right shifts, and variants with rounding.
22272 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
22273 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22274 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22275 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22276 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22277 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22278 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22279 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22280 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22281 /* Shift and insert. Sizes accepted 8 16 32 64. */
22282 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
22283 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
22284 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
22285 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
22286 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
22287 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
22288 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
22289 /* Right shift immediate, saturating & narrowing, with rounding variants.
22290 Types accepted S16 S32 S64 U16 U32 U64. */
22291 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22292 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22293 /* As above, unsigned. Types accepted S16 S32 S64. */
22294 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22295 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22296 /* Right shift narrowing. Types accepted I16 I32 I64. */
22297 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22298 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22299 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
22300 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
22301 /* CVT with optional immediate for fixed-point variant. */
22302 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
22304 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
22305 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
22307 /* Data processing, three registers of different lengths. */
22308 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
22309 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
22310 /* If not scalar, fall back to neon_dyadic_long.
22311 Vector types as above, scalar types S16 S32 U16 U32. */
22312 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22313 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22314 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
22315 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22316 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22317 /* Dyadic, narrowing insns. Types I16 I32 I64. */
22318 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22319 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22320 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22321 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22322 /* Saturating doubling multiplies. Types S16 S32. */
22323 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22324 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22325 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22326 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
22327 S16 S32 U16 U32. */
22328 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
22330 /* Extract. Size 8. */
22331 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
22332 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
22334 /* Two registers, miscellaneous. */
22335 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
22336 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
22337 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
22338 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
22339 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
22340 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
22341 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
22342 /* Vector replicate. Sizes 8 16 32. */
22343 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
22344 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
22345 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
22346 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
22347 /* VMOVN. Types I16 I32 I64. */
22348 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
22349 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
22350 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
22351 /* VQMOVUN. Types S16 S32 S64. */
22352 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
22353 /* VZIP / VUZP. Sizes 8 16 32. */
22354 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22355 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22356 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22357 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22358 /* VQABS / VQNEG. Types S8 S16 S32. */
22359 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22360 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22361 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22362 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22363 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
22364 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22365 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
22366 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22367 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
22368 /* Reciprocal estimates. Types U32 F16 F32. */
22369 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22370 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
22371 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22372 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
22373 /* VCLS. Types S8 S16 S32. */
22374 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
22375 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
22376 /* VCLZ. Types I8 I16 I32. */
22377 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
22378 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
22379 /* VCNT. Size 8. */
22380 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
22381 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
22382 /* Two address, untyped. */
22383 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
22384 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
22385 /* VTRN. Sizes 8 16 32. */
22386 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
22387 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
22389 /* Table lookup. Size 8. */
22390 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22391 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22393 #undef THUMB_VARIANT
22394 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
22396 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
22398 /* Neon element/structure load/store. */
22399 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22400 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22401 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22402 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22403 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22404 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22405 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22406 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22408 #undef THUMB_VARIANT
22409 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
22411 #define ARM_VARIANT & fpu_vfp_ext_v3xd
22412 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
22413 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22414 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22415 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22416 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22417 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22418 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22419 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22420 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22422 #undef THUMB_VARIANT
22423 #define THUMB_VARIANT & fpu_vfp_ext_v3
22425 #define ARM_VARIANT & fpu_vfp_ext_v3
22427 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
22428 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22429 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22430 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22431 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22432 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22433 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22434 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22435 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22438 #define ARM_VARIANT & fpu_vfp_ext_fma
22439 #undef THUMB_VARIANT
22440 #define THUMB_VARIANT & fpu_vfp_ext_fma
22441 /* Mnemonics shared by Neon and VFP. These are included in the
22442 VFP FMA variant; NEON and VFP FMA always includes the NEON
22443 FMA instructions. */
22444 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22445 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22446 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
22447 the v form should always be used. */
22448 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22449 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22450 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22451 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22452 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22453 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22455 #undef THUMB_VARIANT
22457 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
22459 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22460 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22461 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22462 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22463 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22464 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22465 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
22466 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
22469 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
22471 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
22472 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
22473 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
22474 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
22475 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
22476 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
22477 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
22478 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
22479 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
22480 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22481 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22482 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22483 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22484 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22485 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22486 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22487 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22488 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22489 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
22490 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
22491 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22492 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22493 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22494 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22495 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22496 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22497 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
22498 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
22499 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
22500 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
22501 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
22502 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
22503 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
22504 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
22505 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22506 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22507 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22508 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22509 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22510 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22511 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22512 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22513 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22514 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22515 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22516 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22517 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
22518 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22519 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22520 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22521 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22522 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22523 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22524 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22525 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22526 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22527 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22528 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22529 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22530 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22531 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22532 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22533 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22534 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22535 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22536 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22537 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22538 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22539 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
22540 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
22541 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22542 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22543 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22544 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22545 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22546 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22547 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22548 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22549 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22550 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22551 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22552 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22553 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22554 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22555 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22556 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22557 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22558 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22559 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
22560 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22561 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22562 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22563 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22564 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22565 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22566 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22567 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22568 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22569 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22570 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22571 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22572 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22573 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22574 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22575 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22576 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22577 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22578 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22579 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22580 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22581 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
22582 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22583 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22584 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22585 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22586 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22587 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22588 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22589 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22590 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22591 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22592 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22593 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22594 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22595 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22596 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22597 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22598 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22599 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22600 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22601 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22602 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
22603 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
22604 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22605 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22606 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22607 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22608 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22609 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22610 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22611 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22612 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22613 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22614 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22615 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22616 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22617 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22618 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22619 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22620 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22621 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22622 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22623 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22624 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22625 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22626 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22627 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22628 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22629 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22630 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22631 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22632 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
22635 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
22637 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
22638 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
22639 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
22640 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22641 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22642 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22643 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22644 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22645 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22646 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22647 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22648 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22649 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22650 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22651 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22652 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22653 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22654 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22655 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22656 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22657 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
22658 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22659 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22660 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22661 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22662 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22663 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22664 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22665 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22666 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22667 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22668 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22669 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22670 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22671 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22672 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22673 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22674 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22675 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22676 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22677 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22678 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22679 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22680 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22681 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22682 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22683 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22684 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22685 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22686 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22687 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22688 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22689 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22690 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22691 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22692 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22693 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22696 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
22698 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
22699 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
22700 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
22701 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
22702 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
22703 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
22704 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
22705 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
22706 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
22707 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
22708 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
22709 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
22710 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
22711 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
22712 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
22713 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
22714 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
22715 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
22716 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
22717 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
22718 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
22719 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
22720 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
22721 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
22722 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
22723 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
22724 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
22725 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
22726 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
22727 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
22728 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
22729 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
22730 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
22731 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
22732 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
22733 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
22734 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
22735 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
22736 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
22737 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
22738 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
22739 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
22740 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
22741 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
22742 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
22743 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
22744 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
22745 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
22746 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
22747 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
22748 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
22749 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
22750 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
22751 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
22752 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22753 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22754 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22755 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22756 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22757 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22758 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
22759 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
22760 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
22761 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
22762 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22763 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22764 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22765 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22766 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22767 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22768 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22769 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22770 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22771 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22772 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22773 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22775 /* ARMv8.5-A instructions. */
22777 #define ARM_VARIANT & arm_ext_sb
22778 #undef THUMB_VARIANT
22779 #define THUMB_VARIANT & arm_ext_sb
22780 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
22783 #define ARM_VARIANT & arm_ext_predres
22784 #undef THUMB_VARIANT
22785 #define THUMB_VARIANT & arm_ext_predres
22786 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
22787 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
22788 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
22790 /* ARMv8-M instructions. */
22792 #define ARM_VARIANT NULL
22793 #undef THUMB_VARIANT
22794 #define THUMB_VARIANT & arm_ext_v8m
22795 ToU("sg", e97fe97f
, 0, (), noargs
),
22796 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
22797 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
22798 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
22799 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
22800 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
22801 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
22803 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22804 instructions behave as nop if no VFP is present. */
22805 #undef THUMB_VARIANT
22806 #define THUMB_VARIANT & arm_ext_v8m_main
22807 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
22808 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
22810 /* Armv8.1-M Mainline instructions. */
22811 #undef THUMB_VARIANT
22812 #define THUMB_VARIANT & arm_ext_v8_1m_main
22813 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
22814 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
22815 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22816 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
22817 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22819 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
22820 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
22821 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
22823 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
),
22824 ToC("vscclrm", ec9f0a00
, 1, (VRSDVLST
), t_vscclrm
),
22826 #undef THUMB_VARIANT
22827 #define THUMB_VARIANT & mve_ext
22828 ToC("vpst", fe710f4d
, 0, (), mve_vpt
),
22829 ToC("vpstt", fe318f4d
, 0, (), mve_vpt
),
22830 ToC("vpste", fe718f4d
, 0, (), mve_vpt
),
22831 ToC("vpsttt", fe314f4d
, 0, (), mve_vpt
),
22832 ToC("vpstte", fe31cf4d
, 0, (), mve_vpt
),
22833 ToC("vpstet", fe71cf4d
, 0, (), mve_vpt
),
22834 ToC("vpstee", fe714f4d
, 0, (), mve_vpt
),
22835 ToC("vpstttt", fe312f4d
, 0, (), mve_vpt
),
22836 ToC("vpsttte", fe316f4d
, 0, (), mve_vpt
),
22837 ToC("vpsttet", fe31ef4d
, 0, (), mve_vpt
),
22838 ToC("vpsttee", fe31af4d
, 0, (), mve_vpt
),
22839 ToC("vpstett", fe71af4d
, 0, (), mve_vpt
),
22840 ToC("vpstete", fe71ef4d
, 0, (), mve_vpt
),
22841 ToC("vpsteet", fe716f4d
, 0, (), mve_vpt
),
22842 ToC("vpsteee", fe712f4d
, 0, (), mve_vpt
),
22844 /* MVE and MVE FP only. */
22845 mCEF(vmullb
, _vmullb
, 3, (RMQ
, RMQ
, RMQ
), mve_vmull
),
22846 mCEF(vabav
, _vabav
, 3, (RRnpcsp
, RMQ
, RMQ
), mve_vabav
),
22847 mCEF(vmladav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22848 mCEF(vmladava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22849 mCEF(vmladavx
, _vmladavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22850 mCEF(vmladavax
, _vmladavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22851 mCEF(vmlav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22852 mCEF(vmlava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22853 mCEF(vmlsdav
, _vmlsdav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22854 mCEF(vmlsdava
, _vmlsdava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22855 mCEF(vmlsdavx
, _vmlsdavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22856 mCEF(vmlsdavax
, _vmlsdavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22859 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22860 #undef THUMB_VARIANT
22861 #define THUMB_VARIANT & arm_ext_v6t2
22863 mCEF(vmullt
, _vmullt
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQ_RNSC_MQ
), mve_vmull
),
22864 mnCEF(vadd
, _vadd
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
22865 mnCEF(vsub
, _vsub
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
22867 MNCEF(vabs
, 1b10300
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
22868 MNCEF(vneg
, 1b10380
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
22871 #define ARM_VARIANT & fpu_neon_ext_v1
22872 mnUF(vabd
, _vabd
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_dyadic_if_su
),
22873 mnUF(vabdl
, _vabdl
, 3, (RNQMQ
, RNDMQ
, RNDMQ
), neon_dyadic_long
),
22874 mnUF(vaddl
, _vaddl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
22875 mnUF(vsubl
, _vsubl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
22878 #undef THUMB_VARIANT
22910 /* MD interface: bits in the object file. */
22912 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
22913 for use in the a.out file, and stores them in the array pointed to by buf.
22914 This knows about the endian-ness of the target machine and does
22915 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
22916 2 (short) and 4 (long) Floating numbers are put out as a series of
22917 LITTLENUMS (shorts, here at least). */
22920 md_number_to_chars (char * buf
, valueT val
, int n
)
22922 if (target_big_endian
)
22923 number_to_chars_bigendian (buf
, val
, n
);
22925 number_to_chars_littleendian (buf
, val
, n
);
22929 md_chars_to_number (char * buf
, int n
)
22932 unsigned char * where
= (unsigned char *) buf
;
22934 if (target_big_endian
)
22939 result
|= (*where
++ & 255);
22947 result
|= (where
[n
] & 255);
22954 /* MD interface: Sections. */
22956 /* Calculate the maximum variable size (i.e., excluding fr_fix)
22957 that an rs_machine_dependent frag may reach. */
22960 arm_frag_max_var (fragS
*fragp
)
22962 /* We only use rs_machine_dependent for variable-size Thumb instructions,
22963 which are either THUMB_SIZE (2) or INSN_SIZE (4).
22965 Note that we generate relaxable instructions even for cases that don't
22966 really need it, like an immediate that's a trivial constant. So we're
22967 overestimating the instruction size for some of those cases. Rather
22968 than putting more intelligence here, it would probably be better to
22969 avoid generating a relaxation frag in the first place when it can be
22970 determined up front that a short instruction will suffice. */
22972 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
22976 /* Estimate the size of a frag before relaxing. Assume everything fits in
22980 md_estimate_size_before_relax (fragS
* fragp
,
22981 segT segtype ATTRIBUTE_UNUSED
)
22987 /* Convert a machine dependent frag. */
22990 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
22992 unsigned long insn
;
22993 unsigned long old_op
;
23001 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
23003 old_op
= bfd_get_16(abfd
, buf
);
23004 if (fragp
->fr_symbol
)
23006 exp
.X_op
= O_symbol
;
23007 exp
.X_add_symbol
= fragp
->fr_symbol
;
23011 exp
.X_op
= O_constant
;
23013 exp
.X_add_number
= fragp
->fr_offset
;
23014 opcode
= fragp
->fr_subtype
;
23017 case T_MNEM_ldr_pc
:
23018 case T_MNEM_ldr_pc2
:
23019 case T_MNEM_ldr_sp
:
23020 case T_MNEM_str_sp
:
23027 if (fragp
->fr_var
== 4)
23029 insn
= THUMB_OP32 (opcode
);
23030 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
23032 insn
|= (old_op
& 0x700) << 4;
23036 insn
|= (old_op
& 7) << 12;
23037 insn
|= (old_op
& 0x38) << 13;
23039 insn
|= 0x00000c00;
23040 put_thumb32_insn (buf
, insn
);
23041 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
23045 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
23047 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
23050 if (fragp
->fr_var
== 4)
23052 insn
= THUMB_OP32 (opcode
);
23053 insn
|= (old_op
& 0xf0) << 4;
23054 put_thumb32_insn (buf
, insn
);
23055 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
23059 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23060 exp
.X_add_number
-= 4;
23068 if (fragp
->fr_var
== 4)
23070 int r0off
= (opcode
== T_MNEM_mov
23071 || opcode
== T_MNEM_movs
) ? 0 : 8;
23072 insn
= THUMB_OP32 (opcode
);
23073 insn
= (insn
& 0xe1ffffff) | 0x10000000;
23074 insn
|= (old_op
& 0x700) << r0off
;
23075 put_thumb32_insn (buf
, insn
);
23076 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
23080 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
23085 if (fragp
->fr_var
== 4)
23087 insn
= THUMB_OP32(opcode
);
23088 put_thumb32_insn (buf
, insn
);
23089 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
23092 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
23096 if (fragp
->fr_var
== 4)
23098 insn
= THUMB_OP32(opcode
);
23099 insn
|= (old_op
& 0xf00) << 14;
23100 put_thumb32_insn (buf
, insn
);
23101 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
23104 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
23107 case T_MNEM_add_sp
:
23108 case T_MNEM_add_pc
:
23109 case T_MNEM_inc_sp
:
23110 case T_MNEM_dec_sp
:
23111 if (fragp
->fr_var
== 4)
23113 /* ??? Choose between add and addw. */
23114 insn
= THUMB_OP32 (opcode
);
23115 insn
|= (old_op
& 0xf0) << 4;
23116 put_thumb32_insn (buf
, insn
);
23117 if (opcode
== T_MNEM_add_pc
)
23118 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
23120 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23123 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23131 if (fragp
->fr_var
== 4)
23133 insn
= THUMB_OP32 (opcode
);
23134 insn
|= (old_op
& 0xf0) << 4;
23135 insn
|= (old_op
& 0xf) << 16;
23136 put_thumb32_insn (buf
, insn
);
23137 if (insn
& (1 << 20))
23138 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23140 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
23143 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23149 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
23150 (enum bfd_reloc_code_real
) reloc_type
);
23151 fixp
->fx_file
= fragp
->fr_file
;
23152 fixp
->fx_line
= fragp
->fr_line
;
23153 fragp
->fr_fix
+= fragp
->fr_var
;
23155 /* Set whether we use thumb-2 ISA based on final relaxation results. */
23156 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
23157 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
23158 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
23161 /* Return the size of a relaxable immediate operand instruction.
23162 SHIFT and SIZE specify the form of the allowable immediate. */
23164 relax_immediate (fragS
*fragp
, int size
, int shift
)
23170 /* ??? Should be able to do better than this. */
23171 if (fragp
->fr_symbol
)
23174 low
= (1 << shift
) - 1;
23175 mask
= (1 << (shift
+ size
)) - (1 << shift
);
23176 offset
= fragp
->fr_offset
;
23177 /* Force misaligned offsets to 32-bit variant. */
23180 if (offset
& ~mask
)
23185 /* Get the address of a symbol during relaxation. */
23187 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
23193 sym
= fragp
->fr_symbol
;
23194 sym_frag
= symbol_get_frag (sym
);
23195 know (S_GET_SEGMENT (sym
) != absolute_section
23196 || sym_frag
== &zero_address_frag
);
23197 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
23199 /* If frag has yet to be reached on this pass, assume it will
23200 move by STRETCH just as we did. If this is not so, it will
23201 be because some frag between grows, and that will force
23205 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
23209 /* Adjust stretch for any alignment frag. Note that if have
23210 been expanding the earlier code, the symbol may be
23211 defined in what appears to be an earlier frag. FIXME:
23212 This doesn't handle the fr_subtype field, which specifies
23213 a maximum number of bytes to skip when doing an
23215 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
23217 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
23220 stretch
= - ((- stretch
)
23221 & ~ ((1 << (int) f
->fr_offset
) - 1));
23223 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
23235 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
23238 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
23243 /* Assume worst case for symbols not known to be in the same section. */
23244 if (fragp
->fr_symbol
== NULL
23245 || !S_IS_DEFINED (fragp
->fr_symbol
)
23246 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23247 || S_IS_WEAK (fragp
->fr_symbol
))
23250 val
= relaxed_symbol_addr (fragp
, stretch
);
23251 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
23252 addr
= (addr
+ 4) & ~3;
23253 /* Force misaligned targets to 32-bit variant. */
23257 if (val
< 0 || val
> 1020)
23262 /* Return the size of a relaxable add/sub immediate instruction. */
23264 relax_addsub (fragS
*fragp
, asection
*sec
)
23269 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
23270 op
= bfd_get_16(sec
->owner
, buf
);
23271 if ((op
& 0xf) == ((op
>> 4) & 0xf))
23272 return relax_immediate (fragp
, 8, 0);
23274 return relax_immediate (fragp
, 3, 0);
23277 /* Return TRUE iff the definition of symbol S could be pre-empted
23278 (overridden) at link or load time. */
23280 symbol_preemptible (symbolS
*s
)
23282 /* Weak symbols can always be pre-empted. */
23286 /* Non-global symbols cannot be pre-empted. */
23287 if (! S_IS_EXTERNAL (s
))
23291 /* In ELF, a global symbol can be marked protected, or private. In that
23292 case it can't be pre-empted (other definitions in the same link unit
23293 would violate the ODR). */
23294 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
23298 /* Other global symbols might be pre-empted. */
23302 /* Return the size of a relaxable branch instruction. BITS is the
23303 size of the offset field in the narrow instruction. */
23306 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
23312 /* Assume worst case for symbols not known to be in the same section. */
23313 if (!S_IS_DEFINED (fragp
->fr_symbol
)
23314 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23315 || S_IS_WEAK (fragp
->fr_symbol
))
23319 /* A branch to a function in ARM state will require interworking. */
23320 if (S_IS_DEFINED (fragp
->fr_symbol
)
23321 && ARM_IS_FUNC (fragp
->fr_symbol
))
23325 if (symbol_preemptible (fragp
->fr_symbol
))
23328 val
= relaxed_symbol_addr (fragp
, stretch
);
23329 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
23332 /* Offset is a signed value *2 */
23334 if (val
>= limit
|| val
< -limit
)
23340 /* Relax a machine dependent frag. This returns the amount by which
23341 the current size of the frag should change. */
23344 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
23349 oldsize
= fragp
->fr_var
;
23350 switch (fragp
->fr_subtype
)
23352 case T_MNEM_ldr_pc2
:
23353 newsize
= relax_adr (fragp
, sec
, stretch
);
23355 case T_MNEM_ldr_pc
:
23356 case T_MNEM_ldr_sp
:
23357 case T_MNEM_str_sp
:
23358 newsize
= relax_immediate (fragp
, 8, 2);
23362 newsize
= relax_immediate (fragp
, 5, 2);
23366 newsize
= relax_immediate (fragp
, 5, 1);
23370 newsize
= relax_immediate (fragp
, 5, 0);
23373 newsize
= relax_adr (fragp
, sec
, stretch
);
23379 newsize
= relax_immediate (fragp
, 8, 0);
23382 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
23385 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
23387 case T_MNEM_add_sp
:
23388 case T_MNEM_add_pc
:
23389 newsize
= relax_immediate (fragp
, 8, 2);
23391 case T_MNEM_inc_sp
:
23392 case T_MNEM_dec_sp
:
23393 newsize
= relax_immediate (fragp
, 7, 2);
23399 newsize
= relax_addsub (fragp
, sec
);
23405 fragp
->fr_var
= newsize
;
23406 /* Freeze wide instructions that are at or before the same location as
23407 in the previous pass. This avoids infinite loops.
23408 Don't freeze them unconditionally because targets may be artificially
23409 misaligned by the expansion of preceding frags. */
23410 if (stretch
<= 0 && newsize
> 2)
23412 md_convert_frag (sec
->owner
, sec
, fragp
);
23416 return newsize
- oldsize
;
23419 /* Round up a section size to the appropriate boundary. */
23422 md_section_align (segT segment ATTRIBUTE_UNUSED
,
23428 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
23429 of an rs_align_code fragment. */
23432 arm_handle_align (fragS
* fragP
)
23434 static unsigned char const arm_noop
[2][2][4] =
23437 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
23438 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
23441 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
23442 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
23445 static unsigned char const thumb_noop
[2][2][2] =
23448 {0xc0, 0x46}, /* LE */
23449 {0x46, 0xc0}, /* BE */
23452 {0x00, 0xbf}, /* LE */
23453 {0xbf, 0x00} /* BE */
23456 static unsigned char const wide_thumb_noop
[2][4] =
23457 { /* Wide Thumb-2 */
23458 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
23459 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
23462 unsigned bytes
, fix
, noop_size
;
23464 const unsigned char * noop
;
23465 const unsigned char *narrow_noop
= NULL
;
23470 if (fragP
->fr_type
!= rs_align_code
)
23473 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
23474 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
23477 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
23478 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
23480 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
23482 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
23484 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
23485 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
23487 narrow_noop
= thumb_noop
[1][target_big_endian
];
23488 noop
= wide_thumb_noop
[target_big_endian
];
23491 noop
= thumb_noop
[0][target_big_endian
];
23499 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
23500 ? selected_cpu
: arm_arch_none
,
23502 [target_big_endian
];
23509 fragP
->fr_var
= noop_size
;
23511 if (bytes
& (noop_size
- 1))
23513 fix
= bytes
& (noop_size
- 1);
23515 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
23517 memset (p
, 0, fix
);
23524 if (bytes
& noop_size
)
23526 /* Insert a narrow noop. */
23527 memcpy (p
, narrow_noop
, noop_size
);
23529 bytes
-= noop_size
;
23533 /* Use wide noops for the remainder */
23537 while (bytes
>= noop_size
)
23539 memcpy (p
, noop
, noop_size
);
23541 bytes
-= noop_size
;
23545 fragP
->fr_fix
+= fix
;
23548 /* Called from md_do_align. Used to create an alignment
23549 frag in a code section. */
23552 arm_frag_align_code (int n
, int max
)
23556 /* We assume that there will never be a requirement
23557 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
23558 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
23563 _("alignments greater than %d bytes not supported in .text sections."),
23564 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
23565 as_fatal ("%s", err_msg
);
23568 p
= frag_var (rs_align_code
,
23569 MAX_MEM_FOR_RS_ALIGN_CODE
,
23571 (relax_substateT
) max
,
23578 /* Perform target specific initialisation of a frag.
23579 Note - despite the name this initialisation is not done when the frag
23580 is created, but only when its type is assigned. A frag can be created
23581 and used a long time before its type is set, so beware of assuming that
23582 this initialisation is performed first. */
23586 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
23588 /* Record whether this frag is in an ARM or a THUMB area. */
23589 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
23592 #else /* OBJ_ELF is defined. */
23594 arm_init_frag (fragS
* fragP
, int max_chars
)
23596 bfd_boolean frag_thumb_mode
;
23598 /* If the current ARM vs THUMB mode has not already
23599 been recorded into this frag then do so now. */
23600 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
23601 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
23603 /* PR 21809: Do not set a mapping state for debug sections
23604 - it just confuses other tools. */
23605 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
23608 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
23610 /* Record a mapping symbol for alignment frags. We will delete this
23611 later if the alignment ends up empty. */
23612 switch (fragP
->fr_type
)
23615 case rs_align_test
:
23617 mapping_state_2 (MAP_DATA
, max_chars
);
23619 case rs_align_code
:
23620 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
23627 /* When we change sections we need to issue a new mapping symbol. */
23630 arm_elf_change_section (void)
23632 /* Link an unlinked unwind index table section to the .text section. */
23633 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
23634 && elf_linked_to_section (now_seg
) == NULL
)
23635 elf_linked_to_section (now_seg
) = text_section
;
23639 arm_elf_section_type (const char * str
, size_t len
)
23641 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
23642 return SHT_ARM_EXIDX
;
23647 /* Code to deal with unwinding tables. */
23649 static void add_unwind_adjustsp (offsetT
);
23651 /* Generate any deferred unwind frame offset. */
23654 flush_pending_unwind (void)
23658 offset
= unwind
.pending_offset
;
23659 unwind
.pending_offset
= 0;
23661 add_unwind_adjustsp (offset
);
23664 /* Add an opcode to this list for this function. Two-byte opcodes should
23665 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
23669 add_unwind_opcode (valueT op
, int length
)
23671 /* Add any deferred stack adjustment. */
23672 if (unwind
.pending_offset
)
23673 flush_pending_unwind ();
23675 unwind
.sp_restored
= 0;
23677 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
23679 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
23680 if (unwind
.opcodes
)
23681 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
23682 unwind
.opcode_alloc
);
23684 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
23689 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
23691 unwind
.opcode_count
++;
23695 /* Add unwind opcodes to adjust the stack pointer. */
23698 add_unwind_adjustsp (offsetT offset
)
23702 if (offset
> 0x200)
23704 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
23709 /* Long form: 0xb2, uleb128. */
23710 /* This might not fit in a word so add the individual bytes,
23711 remembering the list is built in reverse order. */
23712 o
= (valueT
) ((offset
- 0x204) >> 2);
23714 add_unwind_opcode (0, 1);
23716 /* Calculate the uleb128 encoding of the offset. */
23720 bytes
[n
] = o
& 0x7f;
23726 /* Add the insn. */
23728 add_unwind_opcode (bytes
[n
- 1], 1);
23729 add_unwind_opcode (0xb2, 1);
23731 else if (offset
> 0x100)
23733 /* Two short opcodes. */
23734 add_unwind_opcode (0x3f, 1);
23735 op
= (offset
- 0x104) >> 2;
23736 add_unwind_opcode (op
, 1);
23738 else if (offset
> 0)
23740 /* Short opcode. */
23741 op
= (offset
- 4) >> 2;
23742 add_unwind_opcode (op
, 1);
23744 else if (offset
< 0)
23747 while (offset
> 0x100)
23749 add_unwind_opcode (0x7f, 1);
23752 op
= ((offset
- 4) >> 2) | 0x40;
23753 add_unwind_opcode (op
, 1);
23757 /* Finish the list of unwind opcodes for this function. */
23760 finish_unwind_opcodes (void)
23764 if (unwind
.fp_used
)
23766 /* Adjust sp as necessary. */
23767 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
23768 flush_pending_unwind ();
23770 /* After restoring sp from the frame pointer. */
23771 op
= 0x90 | unwind
.fp_reg
;
23772 add_unwind_opcode (op
, 1);
23775 flush_pending_unwind ();
23779 /* Start an exception table entry. If idx is nonzero this is an index table
23783 start_unwind_section (const segT text_seg
, int idx
)
23785 const char * text_name
;
23786 const char * prefix
;
23787 const char * prefix_once
;
23788 const char * group_name
;
23796 prefix
= ELF_STRING_ARM_unwind
;
23797 prefix_once
= ELF_STRING_ARM_unwind_once
;
23798 type
= SHT_ARM_EXIDX
;
23802 prefix
= ELF_STRING_ARM_unwind_info
;
23803 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
23804 type
= SHT_PROGBITS
;
23807 text_name
= segment_name (text_seg
);
23808 if (streq (text_name
, ".text"))
23811 if (strncmp (text_name
, ".gnu.linkonce.t.",
23812 strlen (".gnu.linkonce.t.")) == 0)
23814 prefix
= prefix_once
;
23815 text_name
+= strlen (".gnu.linkonce.t.");
23818 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
23824 /* Handle COMDAT group. */
23825 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
23827 group_name
= elf_group_name (text_seg
);
23828 if (group_name
== NULL
)
23830 as_bad (_("Group section `%s' has no group signature"),
23831 segment_name (text_seg
));
23832 ignore_rest_of_line ();
23835 flags
|= SHF_GROUP
;
23839 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
23842 /* Set the section link for index tables. */
23844 elf_linked_to_section (now_seg
) = text_seg
;
23848 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23849 personality routine data. Returns zero, or the index table value for
23850 an inline entry. */
23853 create_unwind_entry (int have_data
)
23858 /* The current word of data. */
23860 /* The number of bytes left in this word. */
23863 finish_unwind_opcodes ();
23865 /* Remember the current text section. */
23866 unwind
.saved_seg
= now_seg
;
23867 unwind
.saved_subseg
= now_subseg
;
23869 start_unwind_section (now_seg
, 0);
23871 if (unwind
.personality_routine
== NULL
)
23873 if (unwind
.personality_index
== -2)
23876 as_bad (_("handlerdata in cantunwind frame"));
23877 return 1; /* EXIDX_CANTUNWIND. */
23880 /* Use a default personality routine if none is specified. */
23881 if (unwind
.personality_index
== -1)
23883 if (unwind
.opcode_count
> 3)
23884 unwind
.personality_index
= 1;
23886 unwind
.personality_index
= 0;
23889 /* Space for the personality routine entry. */
23890 if (unwind
.personality_index
== 0)
23892 if (unwind
.opcode_count
> 3)
23893 as_bad (_("too many unwind opcodes for personality routine 0"));
23897 /* All the data is inline in the index table. */
23900 while (unwind
.opcode_count
> 0)
23902 unwind
.opcode_count
--;
23903 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23907 /* Pad with "finish" opcodes. */
23909 data
= (data
<< 8) | 0xb0;
23916 /* We get two opcodes "free" in the first word. */
23917 size
= unwind
.opcode_count
- 2;
23921 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
23922 if (unwind
.personality_index
!= -1)
23924 as_bad (_("attempt to recreate an unwind entry"));
23928 /* An extra byte is required for the opcode count. */
23929 size
= unwind
.opcode_count
+ 1;
23932 size
= (size
+ 3) >> 2;
23934 as_bad (_("too many unwind opcodes"));
23936 frag_align (2, 0, 0);
23937 record_alignment (now_seg
, 2);
23938 unwind
.table_entry
= expr_build_dot ();
23940 /* Allocate the table entry. */
23941 ptr
= frag_more ((size
<< 2) + 4);
23942 /* PR 13449: Zero the table entries in case some of them are not used. */
23943 memset (ptr
, 0, (size
<< 2) + 4);
23944 where
= frag_now_fix () - ((size
<< 2) + 4);
23946 switch (unwind
.personality_index
)
23949 /* ??? Should this be a PLT generating relocation? */
23950 /* Custom personality routine. */
23951 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
23952 BFD_RELOC_ARM_PREL31
);
23957 /* Set the first byte to the number of additional words. */
23958 data
= size
> 0 ? size
- 1 : 0;
23962 /* ABI defined personality routines. */
23964 /* Three opcodes bytes are packed into the first word. */
23971 /* The size and first two opcode bytes go in the first word. */
23972 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
23977 /* Should never happen. */
23981 /* Pack the opcodes into words (MSB first), reversing the list at the same
23983 while (unwind
.opcode_count
> 0)
23987 md_number_to_chars (ptr
, data
, 4);
23992 unwind
.opcode_count
--;
23994 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23997 /* Finish off the last word. */
24000 /* Pad with "finish" opcodes. */
24002 data
= (data
<< 8) | 0xb0;
24004 md_number_to_chars (ptr
, data
, 4);
24009 /* Add an empty descriptor if there is no user-specified data. */
24010 ptr
= frag_more (4);
24011 md_number_to_chars (ptr
, 0, 4);
24018 /* Initialize the DWARF-2 unwind information for this procedure. */
24021 tc_arm_frame_initial_instructions (void)
24023 cfi_add_CFA_def_cfa (REG_SP
, 0);
24025 #endif /* OBJ_ELF */
24027 /* Convert REGNAME to a DWARF-2 register number. */
24030 tc_arm_regname_to_dw2regnum (char *regname
)
24032 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
24036 /* PR 16694: Allow VFP registers as well. */
24037 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
24041 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
24050 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
24054 exp
.X_op
= O_secrel
;
24055 exp
.X_add_symbol
= symbol
;
24056 exp
.X_add_number
= 0;
24057 emit_expr (&exp
, size
);
24061 /* MD interface: Symbol and relocation handling. */
24063 /* Return the address within the segment that a PC-relative fixup is
24064 relative to. For ARM, PC-relative fixups applied to instructions
24065 are generally relative to the location of the fixup plus 8 bytes.
24066 Thumb branches are offset by 4, and Thumb loads relative to PC
24067 require special handling. */
24070 md_pcrel_from_section (fixS
* fixP
, segT seg
)
24072 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24074 /* If this is pc-relative and we are going to emit a relocation
24075 then we just want to put out any pipeline compensation that the linker
24076 will need. Otherwise we want to use the calculated base.
24077 For WinCE we skip the bias for externals as well, since this
24078 is how the MS ARM-CE assembler behaves and we want to be compatible. */
24080 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24081 || (arm_force_relocation (fixP
)
24083 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
24089 switch (fixP
->fx_r_type
)
24091 /* PC relative addressing on the Thumb is slightly odd as the
24092 bottom two bits of the PC are forced to zero for the
24093 calculation. This happens *after* application of the
24094 pipeline offset. However, Thumb adrl already adjusts for
24095 this, so we need not do it again. */
24096 case BFD_RELOC_ARM_THUMB_ADD
:
24099 case BFD_RELOC_ARM_THUMB_OFFSET
:
24100 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24101 case BFD_RELOC_ARM_T32_ADD_PC12
:
24102 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
24103 return (base
+ 4) & ~3;
24105 /* Thumb branches are simply offset by +4. */
24106 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
24107 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24108 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24109 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24110 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24111 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24112 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
24113 case BFD_RELOC_ARM_THUMB_BF17
:
24114 case BFD_RELOC_ARM_THUMB_BF19
:
24115 case BFD_RELOC_ARM_THUMB_BF13
:
24116 case BFD_RELOC_ARM_THUMB_LOOP12
:
24119 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24121 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24122 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24123 && ARM_IS_FUNC (fixP
->fx_addsy
)
24124 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24125 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24128 /* BLX is like branches above, but forces the low two bits of PC to
24130 case BFD_RELOC_THUMB_PCREL_BLX
:
24132 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24133 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24134 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24135 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24136 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24137 return (base
+ 4) & ~3;
24139 /* ARM mode branches are offset by +8. However, the Windows CE
24140 loader expects the relocation not to take this into account. */
24141 case BFD_RELOC_ARM_PCREL_BLX
:
24143 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24144 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24145 && ARM_IS_FUNC (fixP
->fx_addsy
)
24146 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24147 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24150 case BFD_RELOC_ARM_PCREL_CALL
:
24152 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24153 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24154 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24155 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24156 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24159 case BFD_RELOC_ARM_PCREL_BRANCH
:
24160 case BFD_RELOC_ARM_PCREL_JUMP
:
24161 case BFD_RELOC_ARM_PLT32
:
24163 /* When handling fixups immediately, because we have already
24164 discovered the value of a symbol, or the address of the frag involved
24165 we must account for the offset by +8, as the OS loader will never see the reloc.
24166 see fixup_segment() in write.c
24167 The S_IS_EXTERNAL test handles the case of global symbols.
24168 Those need the calculated base, not just the pipe compensation the linker will need. */
24170 && fixP
->fx_addsy
!= NULL
24171 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24172 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
24180 /* ARM mode loads relative to PC are also offset by +8. Unlike
24181 branches, the Windows CE loader *does* expect the relocation
24182 to take this into account. */
24183 case BFD_RELOC_ARM_OFFSET_IMM
:
24184 case BFD_RELOC_ARM_OFFSET_IMM8
:
24185 case BFD_RELOC_ARM_HWLITERAL
:
24186 case BFD_RELOC_ARM_LITERAL
:
24187 case BFD_RELOC_ARM_CP_OFF_IMM
:
24191 /* Other PC-relative relocations are un-offset. */
24197 static bfd_boolean flag_warn_syms
= TRUE
;
24200 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
24202 /* PR 18347 - Warn if the user attempts to create a symbol with the same
24203 name as an ARM instruction. Whilst strictly speaking it is allowed, it
24204 does mean that the resulting code might be very confusing to the reader.
24205 Also this warning can be triggered if the user omits an operand before
24206 an immediate address, eg:
24210 GAS treats this as an assignment of the value of the symbol foo to a
24211 symbol LDR, and so (without this code) it will not issue any kind of
24212 warning or error message.
24214 Note - ARM instructions are case-insensitive but the strings in the hash
24215 table are all stored in lower case, so we must first ensure that name is
24217 if (flag_warn_syms
&& arm_ops_hsh
)
24219 char * nbuf
= strdup (name
);
24222 for (p
= nbuf
; *p
; p
++)
24224 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
24226 static struct hash_control
* already_warned
= NULL
;
24228 if (already_warned
== NULL
)
24229 already_warned
= hash_new ();
24230 /* Only warn about the symbol once. To keep the code
24231 simple we let hash_insert do the lookup for us. */
24232 if (hash_insert (already_warned
, nbuf
, NULL
) == NULL
)
24233 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
24242 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
24243 Otherwise we have no need to default values of symbols. */
24246 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
24249 if (name
[0] == '_' && name
[1] == 'G'
24250 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
24254 if (symbol_find (name
))
24255 as_bad (_("GOT already in the symbol table"));
24257 GOT_symbol
= symbol_new (name
, undefined_section
,
24258 (valueT
) 0, & zero_address_frag
);
24268 /* Subroutine of md_apply_fix. Check to see if an immediate can be
24269 computed as two separate immediate values, added together. We
24270 already know that this value cannot be computed by just one ARM
24273 static unsigned int
24274 validate_immediate_twopart (unsigned int val
,
24275 unsigned int * highpart
)
24280 for (i
= 0; i
< 32; i
+= 2)
24281 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
24287 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
24289 else if (a
& 0xff0000)
24291 if (a
& 0xff000000)
24293 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
24297 gas_assert (a
& 0xff000000);
24298 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
24301 return (a
& 0xff) | (i
<< 7);
24308 validate_offset_imm (unsigned int val
, int hwse
)
24310 if ((hwse
&& val
> 255) || val
> 4095)
24315 /* Subroutine of md_apply_fix. Do those data_ops which can take a
24316 negative immediate constant by altering the instruction. A bit of
24321 by inverting the second operand, and
24324 by negating the second operand. */
24327 negate_data_op (unsigned long * instruction
,
24328 unsigned long value
)
24331 unsigned long negated
, inverted
;
24333 negated
= encode_arm_immediate (-value
);
24334 inverted
= encode_arm_immediate (~value
);
24336 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
24339 /* First negates. */
24340 case OPCODE_SUB
: /* ADD <-> SUB */
24341 new_inst
= OPCODE_ADD
;
24346 new_inst
= OPCODE_SUB
;
24350 case OPCODE_CMP
: /* CMP <-> CMN */
24351 new_inst
= OPCODE_CMN
;
24356 new_inst
= OPCODE_CMP
;
24360 /* Now Inverted ops. */
24361 case OPCODE_MOV
: /* MOV <-> MVN */
24362 new_inst
= OPCODE_MVN
;
24367 new_inst
= OPCODE_MOV
;
24371 case OPCODE_AND
: /* AND <-> BIC */
24372 new_inst
= OPCODE_BIC
;
24377 new_inst
= OPCODE_AND
;
24381 case OPCODE_ADC
: /* ADC <-> SBC */
24382 new_inst
= OPCODE_SBC
;
24387 new_inst
= OPCODE_ADC
;
24391 /* We cannot do anything. */
24396 if (value
== (unsigned) FAIL
)
24399 *instruction
&= OPCODE_MASK
;
24400 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
24404 /* Like negate_data_op, but for Thumb-2. */
24406 static unsigned int
24407 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
24411 unsigned int negated
, inverted
;
24413 negated
= encode_thumb32_immediate (-value
);
24414 inverted
= encode_thumb32_immediate (~value
);
24416 rd
= (*instruction
>> 8) & 0xf;
24417 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
24420 /* ADD <-> SUB. Includes CMP <-> CMN. */
24421 case T2_OPCODE_SUB
:
24422 new_inst
= T2_OPCODE_ADD
;
24426 case T2_OPCODE_ADD
:
24427 new_inst
= T2_OPCODE_SUB
;
24431 /* ORR <-> ORN. Includes MOV <-> MVN. */
24432 case T2_OPCODE_ORR
:
24433 new_inst
= T2_OPCODE_ORN
;
24437 case T2_OPCODE_ORN
:
24438 new_inst
= T2_OPCODE_ORR
;
24442 /* AND <-> BIC. TST has no inverted equivalent. */
24443 case T2_OPCODE_AND
:
24444 new_inst
= T2_OPCODE_BIC
;
24451 case T2_OPCODE_BIC
:
24452 new_inst
= T2_OPCODE_AND
;
24457 case T2_OPCODE_ADC
:
24458 new_inst
= T2_OPCODE_SBC
;
24462 case T2_OPCODE_SBC
:
24463 new_inst
= T2_OPCODE_ADC
;
24467 /* We cannot do anything. */
24472 if (value
== (unsigned int)FAIL
)
24475 *instruction
&= T2_OPCODE_MASK
;
24476 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
24480 /* Read a 32-bit thumb instruction from buf. */
24482 static unsigned long
24483 get_thumb32_insn (char * buf
)
24485 unsigned long insn
;
24486 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
24487 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24492 /* We usually want to set the low bit on the address of thumb function
24493 symbols. In particular .word foo - . should have the low bit set.
24494 Generic code tries to fold the difference of two symbols to
24495 a constant. Prevent this and force a relocation when the first symbols
24496 is a thumb function. */
24499 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
24501 if (op
== O_subtract
24502 && l
->X_op
== O_symbol
24503 && r
->X_op
== O_symbol
24504 && THUMB_IS_FUNC (l
->X_add_symbol
))
24506 l
->X_op
= O_subtract
;
24507 l
->X_op_symbol
= r
->X_add_symbol
;
24508 l
->X_add_number
-= r
->X_add_number
;
24512 /* Process as normal. */
24516 /* Encode Thumb2 unconditional branches and calls. The encoding
24517 for the 2 are identical for the immediate values. */
24520 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
24522 #define T2I1I2MASK ((1 << 13) | (1 << 11))
24525 addressT S
, I1
, I2
, lo
, hi
;
24527 S
= (value
>> 24) & 0x01;
24528 I1
= (value
>> 23) & 0x01;
24529 I2
= (value
>> 22) & 0x01;
24530 hi
= (value
>> 12) & 0x3ff;
24531 lo
= (value
>> 1) & 0x7ff;
24532 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24533 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24534 newval
|= (S
<< 10) | hi
;
24535 newval2
&= ~T2I1I2MASK
;
24536 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
24537 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24538 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
24542 md_apply_fix (fixS
* fixP
,
24546 offsetT value
= * valP
;
24548 unsigned int newimm
;
24549 unsigned long temp
;
24551 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
24553 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
24555 /* Note whether this will delete the relocation. */
24557 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
24560 /* On a 64-bit host, silently truncate 'value' to 32 bits for
24561 consistency with the behaviour on 32-bit hosts. Remember value
24563 value
&= 0xffffffff;
24564 value
^= 0x80000000;
24565 value
-= 0x80000000;
24568 fixP
->fx_addnumber
= value
;
24570 /* Same treatment for fixP->fx_offset. */
24571 fixP
->fx_offset
&= 0xffffffff;
24572 fixP
->fx_offset
^= 0x80000000;
24573 fixP
->fx_offset
-= 0x80000000;
24575 switch (fixP
->fx_r_type
)
24577 case BFD_RELOC_NONE
:
24578 /* This will need to go in the object file. */
24582 case BFD_RELOC_ARM_IMMEDIATE
:
24583 /* We claim that this fixup has been processed here,
24584 even if in fact we generate an error because we do
24585 not have a reloc for it, so tc_gen_reloc will reject it. */
24588 if (fixP
->fx_addsy
)
24590 const char *msg
= 0;
24592 if (! S_IS_DEFINED (fixP
->fx_addsy
))
24593 msg
= _("undefined symbol %s used as an immediate value");
24594 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24595 msg
= _("symbol %s is in a different section");
24596 else if (S_IS_WEAK (fixP
->fx_addsy
))
24597 msg
= _("symbol %s is weak and may be overridden later");
24601 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24602 msg
, S_GET_NAME (fixP
->fx_addsy
));
24607 temp
= md_chars_to_number (buf
, INSN_SIZE
);
24609 /* If the offset is negative, we should use encoding A2 for ADR. */
24610 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
24611 newimm
= negate_data_op (&temp
, value
);
24614 newimm
= encode_arm_immediate (value
);
24616 /* If the instruction will fail, see if we can fix things up by
24617 changing the opcode. */
24618 if (newimm
== (unsigned int) FAIL
)
24619 newimm
= negate_data_op (&temp
, value
);
24620 /* MOV accepts both ARM modified immediate (A1 encoding) and
24621 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
24622 When disassembling, MOV is preferred when there is no encoding
24624 if (newimm
== (unsigned int) FAIL
24625 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
24626 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
24627 && !((temp
>> SBIT_SHIFT
) & 0x1)
24628 && value
>= 0 && value
<= 0xffff)
24630 /* Clear bits[23:20] to change encoding from A1 to A2. */
24631 temp
&= 0xff0fffff;
24632 /* Encoding high 4bits imm. Code below will encode the remaining
24634 temp
|= (value
& 0x0000f000) << 4;
24635 newimm
= value
& 0x00000fff;
24639 if (newimm
== (unsigned int) FAIL
)
24641 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24642 _("invalid constant (%lx) after fixup"),
24643 (unsigned long) value
);
24647 newimm
|= (temp
& 0xfffff000);
24648 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
24651 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
24653 unsigned int highpart
= 0;
24654 unsigned int newinsn
= 0xe1a00000; /* nop. */
24656 if (fixP
->fx_addsy
)
24658 const char *msg
= 0;
24660 if (! S_IS_DEFINED (fixP
->fx_addsy
))
24661 msg
= _("undefined symbol %s used as an immediate value");
24662 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24663 msg
= _("symbol %s is in a different section");
24664 else if (S_IS_WEAK (fixP
->fx_addsy
))
24665 msg
= _("symbol %s is weak and may be overridden later");
24669 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24670 msg
, S_GET_NAME (fixP
->fx_addsy
));
24675 newimm
= encode_arm_immediate (value
);
24676 temp
= md_chars_to_number (buf
, INSN_SIZE
);
24678 /* If the instruction will fail, see if we can fix things up by
24679 changing the opcode. */
24680 if (newimm
== (unsigned int) FAIL
24681 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
24683 /* No ? OK - try using two ADD instructions to generate
24685 newimm
= validate_immediate_twopart (value
, & highpart
);
24687 /* Yes - then make sure that the second instruction is
24689 if (newimm
!= (unsigned int) FAIL
)
24691 /* Still No ? Try using a negated value. */
24692 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
24693 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
24694 /* Otherwise - give up. */
24697 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24698 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
24703 /* Replace the first operand in the 2nd instruction (which
24704 is the PC) with the destination register. We have
24705 already added in the PC in the first instruction and we
24706 do not want to do it again. */
24707 newinsn
&= ~ 0xf0000;
24708 newinsn
|= ((newinsn
& 0x0f000) << 4);
24711 newimm
|= (temp
& 0xfffff000);
24712 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
24714 highpart
|= (newinsn
& 0xfffff000);
24715 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
24719 case BFD_RELOC_ARM_OFFSET_IMM
:
24720 if (!fixP
->fx_done
&& seg
->use_rela_p
)
24722 /* Fall through. */
24724 case BFD_RELOC_ARM_LITERAL
:
24730 if (validate_offset_imm (value
, 0) == FAIL
)
24732 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
24733 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24734 _("invalid literal constant: pool needs to be closer"));
24736 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24737 _("bad immediate value for offset (%ld)"),
24742 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24744 newval
&= 0xfffff000;
24747 newval
&= 0xff7ff000;
24748 newval
|= value
| (sign
? INDEX_UP
: 0);
24750 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24753 case BFD_RELOC_ARM_OFFSET_IMM8
:
24754 case BFD_RELOC_ARM_HWLITERAL
:
24760 if (validate_offset_imm (value
, 1) == FAIL
)
24762 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
24763 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24764 _("invalid literal constant: pool needs to be closer"));
24766 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24767 _("bad immediate value for 8-bit offset (%ld)"),
24772 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24774 newval
&= 0xfffff0f0;
24777 newval
&= 0xff7ff0f0;
24778 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
24780 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24783 case BFD_RELOC_ARM_T32_OFFSET_U8
:
24784 if (value
< 0 || value
> 1020 || value
% 4 != 0)
24785 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24786 _("bad immediate value for offset (%ld)"), (long) value
);
24789 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
24791 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
24794 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24795 /* This is a complicated relocation used for all varieties of Thumb32
24796 load/store instruction with immediate offset:
24798 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24799 *4, optional writeback(W)
24800 (doubleword load/store)
24802 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24803 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24804 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24805 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24806 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24808 Uppercase letters indicate bits that are already encoded at
24809 this point. Lowercase letters are our problem. For the
24810 second block of instructions, the secondary opcode nybble
24811 (bits 8..11) is present, and bit 23 is zero, even if this is
24812 a PC-relative operation. */
24813 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24815 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
24817 if ((newval
& 0xf0000000) == 0xe0000000)
24819 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24821 newval
|= (1 << 23);
24824 if (value
% 4 != 0)
24826 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24827 _("offset not a multiple of 4"));
24833 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24834 _("offset out of range"));
24839 else if ((newval
& 0x000f0000) == 0x000f0000)
24841 /* PC-relative, 12-bit offset. */
24843 newval
|= (1 << 23);
24848 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24849 _("offset out of range"));
24854 else if ((newval
& 0x00000100) == 0x00000100)
24856 /* Writeback: 8-bit, +/- offset. */
24858 newval
|= (1 << 9);
24863 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24864 _("offset out of range"));
24869 else if ((newval
& 0x00000f00) == 0x00000e00)
24871 /* T-instruction: positive 8-bit offset. */
24872 if (value
< 0 || value
> 0xff)
24874 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24875 _("offset out of range"));
24883 /* Positive 12-bit or negative 8-bit offset. */
24887 newval
|= (1 << 23);
24897 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24898 _("offset out of range"));
24905 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
24906 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
24909 case BFD_RELOC_ARM_SHIFT_IMM
:
24910 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24911 if (((unsigned long) value
) > 32
24913 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
24915 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24916 _("shift expression is too large"));
24921 /* Shifts of zero must be done as lsl. */
24923 else if (value
== 32)
24925 newval
&= 0xfffff07f;
24926 newval
|= (value
& 0x1f) << 7;
24927 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24930 case BFD_RELOC_ARM_T32_IMMEDIATE
:
24931 case BFD_RELOC_ARM_T32_ADD_IMM
:
24932 case BFD_RELOC_ARM_T32_IMM12
:
24933 case BFD_RELOC_ARM_T32_ADD_PC12
:
24934 /* We claim that this fixup has been processed here,
24935 even if in fact we generate an error because we do
24936 not have a reloc for it, so tc_gen_reloc will reject it. */
24940 && ! S_IS_DEFINED (fixP
->fx_addsy
))
24942 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24943 _("undefined symbol %s used as an immediate value"),
24944 S_GET_NAME (fixP
->fx_addsy
));
24948 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24950 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
24953 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24954 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
24955 Thumb2 modified immediate encoding (T2). */
24956 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
24957 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24959 newimm
= encode_thumb32_immediate (value
);
24960 if (newimm
== (unsigned int) FAIL
)
24961 newimm
= thumb32_negate_data_op (&newval
, value
);
24963 if (newimm
== (unsigned int) FAIL
)
24965 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
24967 /* Turn add/sum into addw/subw. */
24968 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24969 newval
= (newval
& 0xfeffffff) | 0x02000000;
24970 /* No flat 12-bit imm encoding for addsw/subsw. */
24971 if ((newval
& 0x00100000) == 0)
24973 /* 12 bit immediate for addw/subw. */
24977 newval
^= 0x00a00000;
24980 newimm
= (unsigned int) FAIL
;
24987 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24988 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24989 disassembling, MOV is preferred when there is no encoding
24991 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
24992 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24993 but with the Rn field [19:16] set to 1111. */
24994 && (((newval
>> 16) & 0xf) == 0xf)
24995 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
24996 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
24997 && value
>= 0 && value
<= 0xffff)
24999 /* Toggle bit[25] to change encoding from T2 to T3. */
25001 /* Clear bits[19:16]. */
25002 newval
&= 0xfff0ffff;
25003 /* Encoding high 4bits imm. Code below will encode the
25004 remaining low 12bits. */
25005 newval
|= (value
& 0x0000f000) << 4;
25006 newimm
= value
& 0x00000fff;
25011 if (newimm
== (unsigned int)FAIL
)
25013 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25014 _("invalid constant (%lx) after fixup"),
25015 (unsigned long) value
);
25019 newval
|= (newimm
& 0x800) << 15;
25020 newval
|= (newimm
& 0x700) << 4;
25021 newval
|= (newimm
& 0x0ff);
25023 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
25024 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
25027 case BFD_RELOC_ARM_SMC
:
25028 if (((unsigned long) value
) > 0xffff)
25029 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25030 _("invalid smc expression"));
25031 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25032 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
25033 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25036 case BFD_RELOC_ARM_HVC
:
25037 if (((unsigned long) value
) > 0xffff)
25038 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25039 _("invalid hvc expression"));
25040 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25041 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
25042 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25045 case BFD_RELOC_ARM_SWI
:
25046 if (fixP
->tc_fix_data
!= 0)
25048 if (((unsigned long) value
) > 0xff)
25049 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25050 _("invalid swi expression"));
25051 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25053 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25057 if (((unsigned long) value
) > 0x00ffffff)
25058 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25059 _("invalid swi expression"));
25060 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25062 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25066 case BFD_RELOC_ARM_MULTI
:
25067 if (((unsigned long) value
) > 0xffff)
25068 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25069 _("invalid expression in load/store multiple"));
25070 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
25071 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25075 case BFD_RELOC_ARM_PCREL_CALL
:
25077 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25079 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25080 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25081 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25082 /* Flip the bl to blx. This is a simple flip
25083 bit here because we generate PCREL_CALL for
25084 unconditional bls. */
25086 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25087 newval
= newval
| 0x10000000;
25088 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25094 goto arm_branch_common
;
25096 case BFD_RELOC_ARM_PCREL_JUMP
:
25097 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25099 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25100 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25101 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25103 /* This would map to a bl<cond>, b<cond>,
25104 b<always> to a Thumb function. We
25105 need to force a relocation for this particular
25107 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25110 /* Fall through. */
25112 case BFD_RELOC_ARM_PLT32
:
25114 case BFD_RELOC_ARM_PCREL_BRANCH
:
25116 goto arm_branch_common
;
25118 case BFD_RELOC_ARM_PCREL_BLX
:
25121 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25123 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25124 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25125 && ARM_IS_FUNC (fixP
->fx_addsy
))
25127 /* Flip the blx to a bl and warn. */
25128 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25129 newval
= 0xeb000000;
25130 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25131 _("blx to '%s' an ARM ISA state function changed to bl"),
25133 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25139 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
25140 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
25144 /* We are going to store value (shifted right by two) in the
25145 instruction, in a 24 bit, signed field. Bits 26 through 32 either
25146 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
25149 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25150 _("misaligned branch destination"));
25151 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
25152 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
25153 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25155 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25157 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25158 newval
|= (value
>> 2) & 0x00ffffff;
25159 /* Set the H bit on BLX instructions. */
25163 newval
|= 0x01000000;
25165 newval
&= ~0x01000000;
25167 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25171 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
25172 /* CBZ can only branch forward. */
25174 /* Attempts to use CBZ to branch to the next instruction
25175 (which, strictly speaking, are prohibited) will be turned into
25178 FIXME: It may be better to remove the instruction completely and
25179 perform relaxation. */
25182 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25183 newval
= 0xbf00; /* NOP encoding T1 */
25184 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25189 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25191 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25193 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25194 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
25195 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25200 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
25201 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
25202 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25204 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25206 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25207 newval
|= (value
& 0x1ff) >> 1;
25208 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25212 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
25213 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
25214 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25216 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25218 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25219 newval
|= (value
& 0xfff) >> 1;
25220 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25224 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25226 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25227 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25228 && ARM_IS_FUNC (fixP
->fx_addsy
)
25229 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25231 /* Force a relocation for a branch 20 bits wide. */
25234 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
25235 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25236 _("conditional branch out of range"));
25238 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25241 addressT S
, J1
, J2
, lo
, hi
;
25243 S
= (value
& 0x00100000) >> 20;
25244 J2
= (value
& 0x00080000) >> 19;
25245 J1
= (value
& 0x00040000) >> 18;
25246 hi
= (value
& 0x0003f000) >> 12;
25247 lo
= (value
& 0x00000ffe) >> 1;
25249 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25250 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25251 newval
|= (S
<< 10) | hi
;
25252 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
25253 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25254 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25258 case BFD_RELOC_THUMB_PCREL_BLX
:
25259 /* If there is a blx from a thumb state function to
25260 another thumb function flip this to a bl and warn
25264 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25265 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25266 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25268 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25269 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25270 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
25272 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25273 newval
= newval
| 0x1000;
25274 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25275 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25280 goto thumb_bl_common
;
25282 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25283 /* A bl from Thumb state ISA to an internal ARM state function
25284 is converted to a blx. */
25286 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25287 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25288 && ARM_IS_FUNC (fixP
->fx_addsy
)
25289 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25291 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25292 newval
= newval
& ~0x1000;
25293 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25294 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
25300 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25301 /* For a BLX instruction, make sure that the relocation is rounded up
25302 to a word boundary. This follows the semantics of the instruction
25303 which specifies that bit 1 of the target address will come from bit
25304 1 of the base address. */
25305 value
= (value
+ 3) & ~ 3;
25308 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
25309 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25310 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25313 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
25315 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
25316 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25317 else if ((value
& ~0x1ffffff)
25318 && ((value
& ~0x1ffffff) != ~0x1ffffff))
25319 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25320 _("Thumb2 branch out of range"));
25323 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25324 encode_thumb2_b_bl_offset (buf
, value
);
25328 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25329 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
25330 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25332 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25333 encode_thumb2_b_bl_offset (buf
, value
);
25338 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25343 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25344 md_number_to_chars (buf
, value
, 2);
25348 case BFD_RELOC_ARM_TLS_CALL
:
25349 case BFD_RELOC_ARM_THM_TLS_CALL
:
25350 case BFD_RELOC_ARM_TLS_DESCSEQ
:
25351 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
25352 case BFD_RELOC_ARM_TLS_GOTDESC
:
25353 case BFD_RELOC_ARM_TLS_GD32
:
25354 case BFD_RELOC_ARM_TLS_LE32
:
25355 case BFD_RELOC_ARM_TLS_IE32
:
25356 case BFD_RELOC_ARM_TLS_LDM32
:
25357 case BFD_RELOC_ARM_TLS_LDO32
:
25358 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25361 /* Same handling as above, but with the arm_fdpic guard. */
25362 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
25363 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
25364 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
25367 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25371 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25372 _("Relocation supported only in FDPIC mode"));
25376 case BFD_RELOC_ARM_GOT32
:
25377 case BFD_RELOC_ARM_GOTOFF
:
25380 case BFD_RELOC_ARM_GOT_PREL
:
25381 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25382 md_number_to_chars (buf
, value
, 4);
25385 case BFD_RELOC_ARM_TARGET2
:
25386 /* TARGET2 is not partial-inplace, so we need to write the
25387 addend here for REL targets, because it won't be written out
25388 during reloc processing later. */
25389 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25390 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
25393 /* Relocations for FDPIC. */
25394 case BFD_RELOC_ARM_GOTFUNCDESC
:
25395 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
25396 case BFD_RELOC_ARM_FUNCDESC
:
25399 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25400 md_number_to_chars (buf
, 0, 4);
25404 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25405 _("Relocation supported only in FDPIC mode"));
25410 case BFD_RELOC_RVA
:
25412 case BFD_RELOC_ARM_TARGET1
:
25413 case BFD_RELOC_ARM_ROSEGREL32
:
25414 case BFD_RELOC_ARM_SBREL32
:
25415 case BFD_RELOC_32_PCREL
:
25417 case BFD_RELOC_32_SECREL
:
25419 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25421 /* For WinCE we only do this for pcrel fixups. */
25422 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
25424 md_number_to_chars (buf
, value
, 4);
25428 case BFD_RELOC_ARM_PREL31
:
25429 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25431 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
25432 if ((value
^ (value
>> 1)) & 0x40000000)
25434 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25435 _("rel31 relocation overflow"));
25437 newval
|= value
& 0x7fffffff;
25438 md_number_to_chars (buf
, newval
, 4);
25443 case BFD_RELOC_ARM_CP_OFF_IMM
:
25444 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
25445 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
:
25446 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
25447 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25449 newval
= get_thumb32_insn (buf
);
25450 if ((newval
& 0x0f200f00) == 0x0d000900)
25452 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
25453 has permitted values that are multiples of 2, in the range 0
25455 if (value
< -510 || value
> 510 || (value
& 1))
25456 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25457 _("co-processor offset out of range"));
25459 else if ((newval
& 0xfe001f80) == 0xec000f80)
25461 if (value
< -511 || value
> 512 || (value
& 3))
25462 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25463 _("co-processor offset out of range"));
25465 else if (value
< -1023 || value
> 1023 || (value
& 3))
25466 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25467 _("co-processor offset out of range"));
25472 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25473 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
25474 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25476 newval
= get_thumb32_insn (buf
);
25479 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
25480 newval
&= 0xffffff80;
25482 newval
&= 0xffffff00;
25486 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
25487 newval
&= 0xff7fff80;
25489 newval
&= 0xff7fff00;
25490 if ((newval
& 0x0f200f00) == 0x0d000900)
25492 /* This is a fp16 vstr/vldr.
25494 It requires the immediate offset in the instruction is shifted
25495 left by 1 to be a half-word offset.
25497 Here, left shift by 1 first, and later right shift by 2
25498 should get the right offset. */
25501 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
25503 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25504 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
25505 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25507 put_thumb32_insn (buf
, newval
);
25510 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
25511 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
25512 if (value
< -255 || value
> 255)
25513 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25514 _("co-processor offset out of range"));
25516 goto cp_off_common
;
25518 case BFD_RELOC_ARM_THUMB_OFFSET
:
25519 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25520 /* Exactly what ranges, and where the offset is inserted depends
25521 on the type of instruction, we can establish this from the
25523 switch (newval
>> 12)
25525 case 4: /* PC load. */
25526 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
25527 forced to zero for these loads; md_pcrel_from has already
25528 compensated for this. */
25530 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25531 _("invalid offset, target not word aligned (0x%08lX)"),
25532 (((unsigned long) fixP
->fx_frag
->fr_address
25533 + (unsigned long) fixP
->fx_where
) & ~3)
25534 + (unsigned long) value
);
25536 if (value
& ~0x3fc)
25537 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25538 _("invalid offset, value too big (0x%08lX)"),
25541 newval
|= value
>> 2;
25544 case 9: /* SP load/store. */
25545 if (value
& ~0x3fc)
25546 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25547 _("invalid offset, value too big (0x%08lX)"),
25549 newval
|= value
>> 2;
25552 case 6: /* Word load/store. */
25554 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25555 _("invalid offset, value too big (0x%08lX)"),
25557 newval
|= value
<< 4; /* 6 - 2. */
25560 case 7: /* Byte load/store. */
25562 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25563 _("invalid offset, value too big (0x%08lX)"),
25565 newval
|= value
<< 6;
25568 case 8: /* Halfword load/store. */
25570 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25571 _("invalid offset, value too big (0x%08lX)"),
25573 newval
|= value
<< 5; /* 6 - 1. */
25577 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25578 "Unable to process relocation for thumb opcode: %lx",
25579 (unsigned long) newval
);
25582 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25585 case BFD_RELOC_ARM_THUMB_ADD
:
25586 /* This is a complicated relocation, since we use it for all of
25587 the following immediate relocations:
25591 9bit ADD/SUB SP word-aligned
25592 10bit ADD PC/SP word-aligned
25594 The type of instruction being processed is encoded in the
25601 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25603 int rd
= (newval
>> 4) & 0xf;
25604 int rs
= newval
& 0xf;
25605 int subtract
= !!(newval
& 0x8000);
25607 /* Check for HI regs, only very restricted cases allowed:
25608 Adjusting SP, and using PC or SP to get an address. */
25609 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
25610 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
25611 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25612 _("invalid Hi register with immediate"));
25614 /* If value is negative, choose the opposite instruction. */
25618 subtract
= !subtract
;
25620 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25621 _("immediate value out of range"));
25626 if (value
& ~0x1fc)
25627 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25628 _("invalid immediate for stack address calculation"));
25629 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
25630 newval
|= value
>> 2;
25632 else if (rs
== REG_PC
|| rs
== REG_SP
)
25634 /* PR gas/18541. If the addition is for a defined symbol
25635 within range of an ADR instruction then accept it. */
25638 && fixP
->fx_addsy
!= NULL
)
25642 if (! S_IS_DEFINED (fixP
->fx_addsy
)
25643 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
25644 || S_IS_WEAK (fixP
->fx_addsy
))
25646 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25647 _("address calculation needs a strongly defined nearby symbol"));
25651 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
25653 /* Round up to the next 4-byte boundary. */
25658 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
25662 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25663 _("symbol too far away"));
25673 if (subtract
|| value
& ~0x3fc)
25674 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25675 _("invalid immediate for address calculation (value = 0x%08lX)"),
25676 (unsigned long) (subtract
? - value
: value
));
25677 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
25679 newval
|= value
>> 2;
25684 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25685 _("immediate value out of range"));
25686 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
25687 newval
|= (rd
<< 8) | value
;
25692 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25693 _("immediate value out of range"));
25694 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
25695 newval
|= rd
| (rs
<< 3) | (value
<< 6);
25698 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25701 case BFD_RELOC_ARM_THUMB_IMM
:
25702 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25703 if (value
< 0 || value
> 255)
25704 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25705 _("invalid immediate: %ld is out of range"),
25708 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25711 case BFD_RELOC_ARM_THUMB_SHIFT
:
25712 /* 5bit shift value (0..32). LSL cannot take 32. */
25713 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
25714 temp
= newval
& 0xf800;
25715 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
25716 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25717 _("invalid shift value: %ld"), (long) value
);
25718 /* Shifts of zero must be encoded as LSL. */
25720 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
25721 /* Shifts of 32 are encoded as zero. */
25722 else if (value
== 32)
25724 newval
|= value
<< 6;
25725 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25728 case BFD_RELOC_VTABLE_INHERIT
:
25729 case BFD_RELOC_VTABLE_ENTRY
:
25733 case BFD_RELOC_ARM_MOVW
:
25734 case BFD_RELOC_ARM_MOVT
:
25735 case BFD_RELOC_ARM_THUMB_MOVW
:
25736 case BFD_RELOC_ARM_THUMB_MOVT
:
25737 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25739 /* REL format relocations are limited to a 16-bit addend. */
25740 if (!fixP
->fx_done
)
25742 if (value
< -0x8000 || value
> 0x7fff)
25743 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25744 _("offset out of range"));
25746 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
25747 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
25752 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
25753 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
25755 newval
= get_thumb32_insn (buf
);
25756 newval
&= 0xfbf08f00;
25757 newval
|= (value
& 0xf000) << 4;
25758 newval
|= (value
& 0x0800) << 15;
25759 newval
|= (value
& 0x0700) << 4;
25760 newval
|= (value
& 0x00ff);
25761 put_thumb32_insn (buf
, newval
);
25765 newval
= md_chars_to_number (buf
, 4);
25766 newval
&= 0xfff0f000;
25767 newval
|= value
& 0x0fff;
25768 newval
|= (value
& 0xf000) << 4;
25769 md_number_to_chars (buf
, newval
, 4);
25774 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
25775 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
25776 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
25777 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
25778 gas_assert (!fixP
->fx_done
);
25781 bfd_boolean is_mov
;
25782 bfd_vma encoded_addend
= value
;
25784 /* Check that addend can be encoded in instruction. */
25785 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
25786 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25787 _("the offset 0x%08lX is not representable"),
25788 (unsigned long) encoded_addend
);
25790 /* Extract the instruction. */
25791 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
25792 is_mov
= (insn
& 0xf800) == 0x2000;
25797 if (!seg
->use_rela_p
)
25798 insn
|= encoded_addend
;
25804 /* Extract the instruction. */
25805 /* Encoding is the following
25810 /* The following conditions must be true :
25815 rd
= (insn
>> 4) & 0xf;
25817 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
25818 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25819 _("Unable to process relocation for thumb opcode: %lx"),
25820 (unsigned long) insn
);
25822 /* Encode as ADD immediate8 thumb 1 code. */
25823 insn
= 0x3000 | (rd
<< 8);
25825 /* Place the encoded addend into the first 8 bits of the
25827 if (!seg
->use_rela_p
)
25828 insn
|= encoded_addend
;
25831 /* Update the instruction. */
25832 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
25836 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25837 case BFD_RELOC_ARM_ALU_PC_G0
:
25838 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25839 case BFD_RELOC_ARM_ALU_PC_G1
:
25840 case BFD_RELOC_ARM_ALU_PC_G2
:
25841 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25842 case BFD_RELOC_ARM_ALU_SB_G0
:
25843 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25844 case BFD_RELOC_ARM_ALU_SB_G1
:
25845 case BFD_RELOC_ARM_ALU_SB_G2
:
25846 gas_assert (!fixP
->fx_done
);
25847 if (!seg
->use_rela_p
)
25850 bfd_vma encoded_addend
;
25851 bfd_vma addend_abs
= llabs (value
);
25853 /* Check that the absolute value of the addend can be
25854 expressed as an 8-bit constant plus a rotation. */
25855 encoded_addend
= encode_arm_immediate (addend_abs
);
25856 if (encoded_addend
== (unsigned int) FAIL
)
25857 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25858 _("the offset 0x%08lX is not representable"),
25859 (unsigned long) addend_abs
);
25861 /* Extract the instruction. */
25862 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25864 /* If the addend is positive, use an ADD instruction.
25865 Otherwise use a SUB. Take care not to destroy the S bit. */
25866 insn
&= 0xff1fffff;
25872 /* Place the encoded addend into the first 12 bits of the
25874 insn
&= 0xfffff000;
25875 insn
|= encoded_addend
;
25877 /* Update the instruction. */
25878 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25882 case BFD_RELOC_ARM_LDR_PC_G0
:
25883 case BFD_RELOC_ARM_LDR_PC_G1
:
25884 case BFD_RELOC_ARM_LDR_PC_G2
:
25885 case BFD_RELOC_ARM_LDR_SB_G0
:
25886 case BFD_RELOC_ARM_LDR_SB_G1
:
25887 case BFD_RELOC_ARM_LDR_SB_G2
:
25888 gas_assert (!fixP
->fx_done
);
25889 if (!seg
->use_rela_p
)
25892 bfd_vma addend_abs
= llabs (value
);
25894 /* Check that the absolute value of the addend can be
25895 encoded in 12 bits. */
25896 if (addend_abs
>= 0x1000)
25897 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25898 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25899 (unsigned long) addend_abs
);
25901 /* Extract the instruction. */
25902 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25904 /* If the addend is negative, clear bit 23 of the instruction.
25905 Otherwise set it. */
25907 insn
&= ~(1 << 23);
25911 /* Place the absolute value of the addend into the first 12 bits
25912 of the instruction. */
25913 insn
&= 0xfffff000;
25914 insn
|= addend_abs
;
25916 /* Update the instruction. */
25917 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25921 case BFD_RELOC_ARM_LDRS_PC_G0
:
25922 case BFD_RELOC_ARM_LDRS_PC_G1
:
25923 case BFD_RELOC_ARM_LDRS_PC_G2
:
25924 case BFD_RELOC_ARM_LDRS_SB_G0
:
25925 case BFD_RELOC_ARM_LDRS_SB_G1
:
25926 case BFD_RELOC_ARM_LDRS_SB_G2
:
25927 gas_assert (!fixP
->fx_done
);
25928 if (!seg
->use_rela_p
)
25931 bfd_vma addend_abs
= llabs (value
);
25933 /* Check that the absolute value of the addend can be
25934 encoded in 8 bits. */
25935 if (addend_abs
>= 0x100)
25936 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25937 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
25938 (unsigned long) addend_abs
);
25940 /* Extract the instruction. */
25941 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25943 /* If the addend is negative, clear bit 23 of the instruction.
25944 Otherwise set it. */
25946 insn
&= ~(1 << 23);
25950 /* Place the first four bits of the absolute value of the addend
25951 into the first 4 bits of the instruction, and the remaining
25952 four into bits 8 .. 11. */
25953 insn
&= 0xfffff0f0;
25954 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
25956 /* Update the instruction. */
25957 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25961 case BFD_RELOC_ARM_LDC_PC_G0
:
25962 case BFD_RELOC_ARM_LDC_PC_G1
:
25963 case BFD_RELOC_ARM_LDC_PC_G2
:
25964 case BFD_RELOC_ARM_LDC_SB_G0
:
25965 case BFD_RELOC_ARM_LDC_SB_G1
:
25966 case BFD_RELOC_ARM_LDC_SB_G2
:
25967 gas_assert (!fixP
->fx_done
);
25968 if (!seg
->use_rela_p
)
25971 bfd_vma addend_abs
= llabs (value
);
25973 /* Check that the absolute value of the addend is a multiple of
25974 four and, when divided by four, fits in 8 bits. */
25975 if (addend_abs
& 0x3)
25976 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25977 _("bad offset 0x%08lX (must be word-aligned)"),
25978 (unsigned long) addend_abs
);
25980 if ((addend_abs
>> 2) > 0xff)
25981 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25982 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
25983 (unsigned long) addend_abs
);
25985 /* Extract the instruction. */
25986 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25988 /* If the addend is negative, clear bit 23 of the instruction.
25989 Otherwise set it. */
25991 insn
&= ~(1 << 23);
25995 /* Place the addend (divided by four) into the first eight
25996 bits of the instruction. */
25997 insn
&= 0xfffffff0;
25998 insn
|= addend_abs
>> 2;
26000 /* Update the instruction. */
26001 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26005 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
26007 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26008 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26009 && ARM_IS_FUNC (fixP
->fx_addsy
)
26010 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26012 /* Force a relocation for a branch 5 bits wide. */
26015 if (v8_1_branch_value_check (value
, 5, FALSE
) == FAIL
)
26016 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26019 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26021 addressT boff
= value
>> 1;
26023 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26024 newval
|= (boff
<< 7);
26025 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26029 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
26031 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26032 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26033 && ARM_IS_FUNC (fixP
->fx_addsy
)
26034 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26038 if ((value
& ~0x7f) && ((value
& ~0x3f) != ~0x3f))
26039 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26040 _("branch out of range"));
26042 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26044 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26046 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
26047 addressT diff
= value
- boff
;
26051 newval
|= 1 << 1; /* T bit. */
26053 else if (diff
!= 2)
26055 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26056 _("out of range label-relative fixup value"));
26058 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26062 case BFD_RELOC_ARM_THUMB_BF17
:
26064 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26065 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26066 && ARM_IS_FUNC (fixP
->fx_addsy
)
26067 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26069 /* Force a relocation for a branch 17 bits wide. */
26073 if (v8_1_branch_value_check (value
, 17, TRUE
) == FAIL
)
26074 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26077 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26080 addressT immA
, immB
, immC
;
26082 immA
= (value
& 0x0001f000) >> 12;
26083 immB
= (value
& 0x00000ffc) >> 2;
26084 immC
= (value
& 0x00000002) >> 1;
26086 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26087 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26089 newval2
|= (immC
<< 11) | (immB
<< 1);
26090 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26091 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26095 case BFD_RELOC_ARM_THUMB_BF19
:
26097 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26098 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26099 && ARM_IS_FUNC (fixP
->fx_addsy
)
26100 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26102 /* Force a relocation for a branch 19 bits wide. */
26106 if (v8_1_branch_value_check (value
, 19, TRUE
) == FAIL
)
26107 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26110 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26113 addressT immA
, immB
, immC
;
26115 immA
= (value
& 0x0007f000) >> 12;
26116 immB
= (value
& 0x00000ffc) >> 2;
26117 immC
= (value
& 0x00000002) >> 1;
26119 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26120 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26122 newval2
|= (immC
<< 11) | (immB
<< 1);
26123 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26124 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26128 case BFD_RELOC_ARM_THUMB_BF13
:
26130 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26131 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26132 && ARM_IS_FUNC (fixP
->fx_addsy
)
26133 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26135 /* Force a relocation for a branch 13 bits wide. */
26139 if (v8_1_branch_value_check (value
, 13, TRUE
) == FAIL
)
26140 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26143 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26146 addressT immA
, immB
, immC
;
26148 immA
= (value
& 0x00001000) >> 12;
26149 immB
= (value
& 0x00000ffc) >> 2;
26150 immC
= (value
& 0x00000002) >> 1;
26152 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26153 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26155 newval2
|= (immC
<< 11) | (immB
<< 1);
26156 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26157 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26161 case BFD_RELOC_ARM_THUMB_LOOP12
:
26163 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26164 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26165 && ARM_IS_FUNC (fixP
->fx_addsy
)
26166 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26168 /* Force a relocation for a branch 12 bits wide. */
26172 bfd_vma insn
= get_thumb32_insn (buf
);
26173 /* le lr, <label> or le <label> */
26174 if (((insn
& 0xffffffff) == 0xf00fc001)
26175 || ((insn
& 0xffffffff) == 0xf02fc001))
26178 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
26179 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26181 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26183 addressT imml
, immh
;
26185 immh
= (value
& 0x00000ffc) >> 2;
26186 imml
= (value
& 0x00000002) >> 1;
26188 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26189 newval
|= (imml
<< 11) | (immh
<< 1);
26190 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
26194 case BFD_RELOC_ARM_V4BX
:
26195 /* This will need to go in the object file. */
26199 case BFD_RELOC_UNUSED
:
26201 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26202 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
26206 /* Translate internal representation of relocation info to BFD target
26210 tc_gen_reloc (asection
*section
, fixS
*fixp
)
26213 bfd_reloc_code_real_type code
;
26215 reloc
= XNEW (arelent
);
26217 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
26218 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
26219 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
26221 if (fixp
->fx_pcrel
)
26223 if (section
->use_rela_p
)
26224 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
26226 fixp
->fx_offset
= reloc
->address
;
26228 reloc
->addend
= fixp
->fx_offset
;
26230 switch (fixp
->fx_r_type
)
26233 if (fixp
->fx_pcrel
)
26235 code
= BFD_RELOC_8_PCREL
;
26238 /* Fall through. */
26241 if (fixp
->fx_pcrel
)
26243 code
= BFD_RELOC_16_PCREL
;
26246 /* Fall through. */
26249 if (fixp
->fx_pcrel
)
26251 code
= BFD_RELOC_32_PCREL
;
26254 /* Fall through. */
26256 case BFD_RELOC_ARM_MOVW
:
26257 if (fixp
->fx_pcrel
)
26259 code
= BFD_RELOC_ARM_MOVW_PCREL
;
26262 /* Fall through. */
26264 case BFD_RELOC_ARM_MOVT
:
26265 if (fixp
->fx_pcrel
)
26267 code
= BFD_RELOC_ARM_MOVT_PCREL
;
26270 /* Fall through. */
26272 case BFD_RELOC_ARM_THUMB_MOVW
:
26273 if (fixp
->fx_pcrel
)
26275 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
26278 /* Fall through. */
26280 case BFD_RELOC_ARM_THUMB_MOVT
:
26281 if (fixp
->fx_pcrel
)
26283 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
26286 /* Fall through. */
26288 case BFD_RELOC_NONE
:
26289 case BFD_RELOC_ARM_PCREL_BRANCH
:
26290 case BFD_RELOC_ARM_PCREL_BLX
:
26291 case BFD_RELOC_RVA
:
26292 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
26293 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
26294 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
26295 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
26296 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26297 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
26298 case BFD_RELOC_VTABLE_ENTRY
:
26299 case BFD_RELOC_VTABLE_INHERIT
:
26301 case BFD_RELOC_32_SECREL
:
26303 code
= fixp
->fx_r_type
;
26306 case BFD_RELOC_THUMB_PCREL_BLX
:
26308 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
26309 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
26312 code
= BFD_RELOC_THUMB_PCREL_BLX
;
26315 case BFD_RELOC_ARM_LITERAL
:
26316 case BFD_RELOC_ARM_HWLITERAL
:
26317 /* If this is called then the a literal has
26318 been referenced across a section boundary. */
26319 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26320 _("literal referenced across section boundary"));
26324 case BFD_RELOC_ARM_TLS_CALL
:
26325 case BFD_RELOC_ARM_THM_TLS_CALL
:
26326 case BFD_RELOC_ARM_TLS_DESCSEQ
:
26327 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
26328 case BFD_RELOC_ARM_GOT32
:
26329 case BFD_RELOC_ARM_GOTOFF
:
26330 case BFD_RELOC_ARM_GOT_PREL
:
26331 case BFD_RELOC_ARM_PLT32
:
26332 case BFD_RELOC_ARM_TARGET1
:
26333 case BFD_RELOC_ARM_ROSEGREL32
:
26334 case BFD_RELOC_ARM_SBREL32
:
26335 case BFD_RELOC_ARM_PREL31
:
26336 case BFD_RELOC_ARM_TARGET2
:
26337 case BFD_RELOC_ARM_TLS_LDO32
:
26338 case BFD_RELOC_ARM_PCREL_CALL
:
26339 case BFD_RELOC_ARM_PCREL_JUMP
:
26340 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
26341 case BFD_RELOC_ARM_ALU_PC_G0
:
26342 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
26343 case BFD_RELOC_ARM_ALU_PC_G1
:
26344 case BFD_RELOC_ARM_ALU_PC_G2
:
26345 case BFD_RELOC_ARM_LDR_PC_G0
:
26346 case BFD_RELOC_ARM_LDR_PC_G1
:
26347 case BFD_RELOC_ARM_LDR_PC_G2
:
26348 case BFD_RELOC_ARM_LDRS_PC_G0
:
26349 case BFD_RELOC_ARM_LDRS_PC_G1
:
26350 case BFD_RELOC_ARM_LDRS_PC_G2
:
26351 case BFD_RELOC_ARM_LDC_PC_G0
:
26352 case BFD_RELOC_ARM_LDC_PC_G1
:
26353 case BFD_RELOC_ARM_LDC_PC_G2
:
26354 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
26355 case BFD_RELOC_ARM_ALU_SB_G0
:
26356 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
26357 case BFD_RELOC_ARM_ALU_SB_G1
:
26358 case BFD_RELOC_ARM_ALU_SB_G2
:
26359 case BFD_RELOC_ARM_LDR_SB_G0
:
26360 case BFD_RELOC_ARM_LDR_SB_G1
:
26361 case BFD_RELOC_ARM_LDR_SB_G2
:
26362 case BFD_RELOC_ARM_LDRS_SB_G0
:
26363 case BFD_RELOC_ARM_LDRS_SB_G1
:
26364 case BFD_RELOC_ARM_LDRS_SB_G2
:
26365 case BFD_RELOC_ARM_LDC_SB_G0
:
26366 case BFD_RELOC_ARM_LDC_SB_G1
:
26367 case BFD_RELOC_ARM_LDC_SB_G2
:
26368 case BFD_RELOC_ARM_V4BX
:
26369 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
26370 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
26371 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
26372 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
26373 case BFD_RELOC_ARM_GOTFUNCDESC
:
26374 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
26375 case BFD_RELOC_ARM_FUNCDESC
:
26376 case BFD_RELOC_ARM_THUMB_BF17
:
26377 case BFD_RELOC_ARM_THUMB_BF19
:
26378 case BFD_RELOC_ARM_THUMB_BF13
:
26379 code
= fixp
->fx_r_type
;
26382 case BFD_RELOC_ARM_TLS_GOTDESC
:
26383 case BFD_RELOC_ARM_TLS_GD32
:
26384 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
26385 case BFD_RELOC_ARM_TLS_LE32
:
26386 case BFD_RELOC_ARM_TLS_IE32
:
26387 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
26388 case BFD_RELOC_ARM_TLS_LDM32
:
26389 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
26390 /* BFD will include the symbol's address in the addend.
26391 But we don't want that, so subtract it out again here. */
26392 if (!S_IS_COMMON (fixp
->fx_addsy
))
26393 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
26394 code
= fixp
->fx_r_type
;
26398 case BFD_RELOC_ARM_IMMEDIATE
:
26399 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26400 _("internal relocation (type: IMMEDIATE) not fixed up"));
26403 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
26404 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26405 _("ADRL used for a symbol not defined in the same file"));
26408 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
26409 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
26410 case BFD_RELOC_ARM_THUMB_LOOP12
:
26411 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26412 _("%s used for a symbol not defined in the same file"),
26413 bfd_get_reloc_code_name (fixp
->fx_r_type
));
26416 case BFD_RELOC_ARM_OFFSET_IMM
:
26417 if (section
->use_rela_p
)
26419 code
= fixp
->fx_r_type
;
26423 if (fixp
->fx_addsy
!= NULL
26424 && !S_IS_DEFINED (fixp
->fx_addsy
)
26425 && S_IS_LOCAL (fixp
->fx_addsy
))
26427 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26428 _("undefined local label `%s'"),
26429 S_GET_NAME (fixp
->fx_addsy
));
26433 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26434 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
26441 switch (fixp
->fx_r_type
)
26443 case BFD_RELOC_NONE
: type
= "NONE"; break;
26444 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
26445 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
26446 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
26447 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
26448 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
26449 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
26450 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
26451 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
26452 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
26453 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
26454 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
26455 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
26456 default: type
= _("<unknown>"); break;
26458 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26459 _("cannot represent %s relocation in this object file format"),
26466 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
26468 && fixp
->fx_addsy
== GOT_symbol
)
26470 code
= BFD_RELOC_ARM_GOTPC
;
26471 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
26475 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
26477 if (reloc
->howto
== NULL
)
26479 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26480 _("cannot represent %s relocation in this object file format"),
26481 bfd_get_reloc_code_name (code
));
26485 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
26486 vtable entry to be used in the relocation's section offset. */
26487 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
26488 reloc
->address
= fixp
->fx_offset
;
26493 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
26496 cons_fix_new_arm (fragS
* frag
,
26500 bfd_reloc_code_real_type reloc
)
26505 FIXME: @@ Should look at CPU word size. */
26509 reloc
= BFD_RELOC_8
;
26512 reloc
= BFD_RELOC_16
;
26516 reloc
= BFD_RELOC_32
;
26519 reloc
= BFD_RELOC_64
;
26524 if (exp
->X_op
== O_secrel
)
26526 exp
->X_op
= O_symbol
;
26527 reloc
= BFD_RELOC_32_SECREL
;
26531 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
26534 #if defined (OBJ_COFF)
26536 arm_validate_fix (fixS
* fixP
)
26538 /* If the destination of the branch is a defined symbol which does not have
26539 the THUMB_FUNC attribute, then we must be calling a function which has
26540 the (interfacearm) attribute. We look for the Thumb entry point to that
26541 function and change the branch to refer to that function instead. */
26542 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
26543 && fixP
->fx_addsy
!= NULL
26544 && S_IS_DEFINED (fixP
->fx_addsy
)
26545 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
26547 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
26554 arm_force_relocation (struct fix
* fixp
)
26556 #if defined (OBJ_COFF) && defined (TE_PE)
26557 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
26561 /* In case we have a call or a branch to a function in ARM ISA mode from
26562 a thumb function or vice-versa force the relocation. These relocations
26563 are cleared off for some cores that might have blx and simple transformations
26567 switch (fixp
->fx_r_type
)
26569 case BFD_RELOC_ARM_PCREL_JUMP
:
26570 case BFD_RELOC_ARM_PCREL_CALL
:
26571 case BFD_RELOC_THUMB_PCREL_BLX
:
26572 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
26576 case BFD_RELOC_ARM_PCREL_BLX
:
26577 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
26578 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
26579 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26580 if (ARM_IS_FUNC (fixp
->fx_addsy
))
26589 /* Resolve these relocations even if the symbol is extern or weak.
26590 Technically this is probably wrong due to symbol preemption.
26591 In practice these relocations do not have enough range to be useful
26592 at dynamic link time, and some code (e.g. in the Linux kernel)
26593 expects these references to be resolved. */
26594 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
26595 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
26596 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
26597 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
26598 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
26599 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
26600 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
26601 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
26602 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
26603 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
26604 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
26605 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
26606 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
26607 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
26610 /* Always leave these relocations for the linker. */
26611 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
26612 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
26613 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
26616 /* Always generate relocations against function symbols. */
26617 if (fixp
->fx_r_type
== BFD_RELOC_32
26619 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
26622 return generic_force_reloc (fixp
);
26625 #if defined (OBJ_ELF) || defined (OBJ_COFF)
26626 /* Relocations against function names must be left unadjusted,
26627 so that the linker can use this information to generate interworking
26628 stubs. The MIPS version of this function
26629 also prevents relocations that are mips-16 specific, but I do not
26630 know why it does this.
26633 There is one other problem that ought to be addressed here, but
26634 which currently is not: Taking the address of a label (rather
26635 than a function) and then later jumping to that address. Such
26636 addresses also ought to have their bottom bit set (assuming that
26637 they reside in Thumb code), but at the moment they will not. */
26640 arm_fix_adjustable (fixS
* fixP
)
26642 if (fixP
->fx_addsy
== NULL
)
26645 /* Preserve relocations against symbols with function type. */
26646 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
26649 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
26650 && fixP
->fx_subsy
== NULL
)
26653 /* We need the symbol name for the VTABLE entries. */
26654 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
26655 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
26658 /* Don't allow symbols to be discarded on GOT related relocs. */
26659 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
26660 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
26661 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
26662 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
26663 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
26664 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
26665 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
26666 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
26667 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
26668 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
26669 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
26670 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
26671 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
26672 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
26673 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
26674 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
26675 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
26678 /* Similarly for group relocations. */
26679 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
26680 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
26681 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
26684 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
26685 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
26686 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
26687 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
26688 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
26689 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
26690 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
26691 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
26692 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
26695 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
26696 offsets, so keep these symbols. */
26697 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
26698 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
26703 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
26707 elf32_arm_target_format (void)
26710 return (target_big_endian
26711 ? "elf32-bigarm-symbian"
26712 : "elf32-littlearm-symbian");
26713 #elif defined (TE_VXWORKS)
26714 return (target_big_endian
26715 ? "elf32-bigarm-vxworks"
26716 : "elf32-littlearm-vxworks");
26717 #elif defined (TE_NACL)
26718 return (target_big_endian
26719 ? "elf32-bigarm-nacl"
26720 : "elf32-littlearm-nacl");
26724 if (target_big_endian
)
26725 return "elf32-bigarm-fdpic";
26727 return "elf32-littlearm-fdpic";
26731 if (target_big_endian
)
26732 return "elf32-bigarm";
26734 return "elf32-littlearm";
26740 armelf_frob_symbol (symbolS
* symp
,
26743 elf_frob_symbol (symp
, puntp
);
26747 /* MD interface: Finalization. */
26752 literal_pool
* pool
;
26754 /* Ensure that all the predication blocks are properly closed. */
26755 check_pred_blocks_finished ();
26757 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
26759 /* Put it at the end of the relevant section. */
26760 subseg_set (pool
->section
, pool
->sub_section
);
26762 arm_elf_change_section ();
26769 /* Remove any excess mapping symbols generated for alignment frags in
26770 SEC. We may have created a mapping symbol before a zero byte
26771 alignment; remove it if there's a mapping symbol after the
26774 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
26775 void *dummy ATTRIBUTE_UNUSED
)
26777 segment_info_type
*seginfo
= seg_info (sec
);
26780 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
26783 for (fragp
= seginfo
->frchainP
->frch_root
;
26785 fragp
= fragp
->fr_next
)
26787 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
26788 fragS
*next
= fragp
->fr_next
;
26790 /* Variable-sized frags have been converted to fixed size by
26791 this point. But if this was variable-sized to start with,
26792 there will be a fixed-size frag after it. So don't handle
26794 if (sym
== NULL
|| next
== NULL
)
26797 if (S_GET_VALUE (sym
) < next
->fr_address
)
26798 /* Not at the end of this frag. */
26800 know (S_GET_VALUE (sym
) == next
->fr_address
);
26804 if (next
->tc_frag_data
.first_map
!= NULL
)
26806 /* Next frag starts with a mapping symbol. Discard this
26808 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26812 if (next
->fr_next
== NULL
)
26814 /* This mapping symbol is at the end of the section. Discard
26816 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
26817 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26821 /* As long as we have empty frags without any mapping symbols,
26823 /* If the next frag is non-empty and does not start with a
26824 mapping symbol, then this mapping symbol is required. */
26825 if (next
->fr_address
!= next
->fr_next
->fr_address
)
26828 next
= next
->fr_next
;
26830 while (next
!= NULL
);
26835 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26839 arm_adjust_symtab (void)
26844 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26846 if (ARM_IS_THUMB (sym
))
26848 if (THUMB_IS_FUNC (sym
))
26850 /* Mark the symbol as a Thumb function. */
26851 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
26852 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
26853 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
26855 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
26856 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
26858 as_bad (_("%s: unexpected function type: %d"),
26859 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
26861 else switch (S_GET_STORAGE_CLASS (sym
))
26864 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
26867 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
26870 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
26878 if (ARM_IS_INTERWORK (sym
))
26879 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
26886 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26888 if (ARM_IS_THUMB (sym
))
26890 elf_symbol_type
* elf_sym
;
26892 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
26893 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
26895 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
26896 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
26898 /* If it's a .thumb_func, declare it as so,
26899 otherwise tag label as .code 16. */
26900 if (THUMB_IS_FUNC (sym
))
26901 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
26902 ST_BRANCH_TO_THUMB
);
26903 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26904 elf_sym
->internal_elf_sym
.st_info
=
26905 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
26910 /* Remove any overlapping mapping symbols generated by alignment frags. */
26911 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
26912 /* Now do generic ELF adjustments. */
26913 elf_adjust_symtab ();
26917 /* MD interface: Initialization. */
26920 set_constant_flonums (void)
26924 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
26925 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
26929 /* Auto-select Thumb mode if it's the only available instruction set for the
26930 given architecture. */
26933 autoselect_thumb_from_cpu_variant (void)
26935 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
26936 opcode_select (16);
26945 if ( (arm_ops_hsh
= hash_new ()) == NULL
26946 || (arm_cond_hsh
= hash_new ()) == NULL
26947 || (arm_vcond_hsh
= hash_new ()) == NULL
26948 || (arm_shift_hsh
= hash_new ()) == NULL
26949 || (arm_psr_hsh
= hash_new ()) == NULL
26950 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
26951 || (arm_reg_hsh
= hash_new ()) == NULL
26952 || (arm_reloc_hsh
= hash_new ()) == NULL
26953 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
26954 as_fatal (_("virtual memory exhausted"));
26956 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
26957 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
26958 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
26959 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
26960 for (i
= 0; i
< sizeof (vconds
) / sizeof (struct asm_cond
); i
++)
26961 hash_insert (arm_vcond_hsh
, vconds
[i
].template_name
, (void *) (vconds
+ i
));
26962 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
26963 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
26964 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
26965 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
26966 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
26967 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
26968 (void *) (v7m_psrs
+ i
));
26969 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
26970 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
26972 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
26974 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
26975 (void *) (barrier_opt_names
+ i
));
26977 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
26979 struct reloc_entry
* entry
= reloc_names
+ i
;
26981 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
26982 /* This makes encode_branch() use the EABI versions of this relocation. */
26983 entry
->reloc
= BFD_RELOC_UNUSED
;
26985 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
26989 set_constant_flonums ();
26991 /* Set the cpu variant based on the command-line options. We prefer
26992 -mcpu= over -march= if both are set (as for GCC); and we prefer
26993 -mfpu= over any other way of setting the floating point unit.
26994 Use of legacy options with new options are faulted. */
26997 if (mcpu_cpu_opt
|| march_cpu_opt
)
26998 as_bad (_("use of old and new-style options to set CPU type"));
27000 selected_arch
= *legacy_cpu
;
27002 else if (mcpu_cpu_opt
)
27004 selected_arch
= *mcpu_cpu_opt
;
27005 selected_ext
= *mcpu_ext_opt
;
27007 else if (march_cpu_opt
)
27009 selected_arch
= *march_cpu_opt
;
27010 selected_ext
= *march_ext_opt
;
27012 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
27017 as_bad (_("use of old and new-style options to set FPU type"));
27019 selected_fpu
= *legacy_fpu
;
27022 selected_fpu
= *mfpu_opt
;
27025 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
27026 || defined (TE_NetBSD) || defined (TE_VXWORKS))
27027 /* Some environments specify a default FPU. If they don't, infer it
27028 from the processor. */
27030 selected_fpu
= *mcpu_fpu_opt
;
27031 else if (march_fpu_opt
)
27032 selected_fpu
= *march_fpu_opt
;
27034 selected_fpu
= fpu_default
;
27038 if (ARM_FEATURE_ZERO (selected_fpu
))
27040 if (!no_cpu_selected ())
27041 selected_fpu
= fpu_default
;
27043 selected_fpu
= fpu_arch_fpa
;
27047 if (ARM_FEATURE_ZERO (selected_arch
))
27049 selected_arch
= cpu_default
;
27050 selected_cpu
= selected_arch
;
27052 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
27054 /* Autodection of feature mode: allow all features in cpu_variant but leave
27055 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
27056 after all instruction have been processed and we can decide what CPU
27057 should be selected. */
27058 if (ARM_FEATURE_ZERO (selected_arch
))
27059 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
27061 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
27064 autoselect_thumb_from_cpu_variant ();
27066 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
27068 #if defined OBJ_COFF || defined OBJ_ELF
27070 unsigned int flags
= 0;
27072 #if defined OBJ_ELF
27073 flags
= meabi_flags
;
27075 switch (meabi_flags
)
27077 case EF_ARM_EABI_UNKNOWN
:
27079 /* Set the flags in the private structure. */
27080 if (uses_apcs_26
) flags
|= F_APCS26
;
27081 if (support_interwork
) flags
|= F_INTERWORK
;
27082 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
27083 if (pic_code
) flags
|= F_PIC
;
27084 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
27085 flags
|= F_SOFT_FLOAT
;
27087 switch (mfloat_abi_opt
)
27089 case ARM_FLOAT_ABI_SOFT
:
27090 case ARM_FLOAT_ABI_SOFTFP
:
27091 flags
|= F_SOFT_FLOAT
;
27094 case ARM_FLOAT_ABI_HARD
:
27095 if (flags
& F_SOFT_FLOAT
)
27096 as_bad (_("hard-float conflicts with specified fpu"));
27100 /* Using pure-endian doubles (even if soft-float). */
27101 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
27102 flags
|= F_VFP_FLOAT
;
27104 #if defined OBJ_ELF
27105 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
27106 flags
|= EF_ARM_MAVERICK_FLOAT
;
27109 case EF_ARM_EABI_VER4
:
27110 case EF_ARM_EABI_VER5
:
27111 /* No additional flags to set. */
27118 bfd_set_private_flags (stdoutput
, flags
);
27120 /* We have run out flags in the COFF header to encode the
27121 status of ATPCS support, so instead we create a dummy,
27122 empty, debug section called .arm.atpcs. */
27127 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
27131 bfd_set_section_flags
27132 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
27133 bfd_set_section_size (stdoutput
, sec
, 0);
27134 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
27140 /* Record the CPU type as well. */
27141 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
27142 mach
= bfd_mach_arm_iWMMXt2
;
27143 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
27144 mach
= bfd_mach_arm_iWMMXt
;
27145 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
27146 mach
= bfd_mach_arm_XScale
;
27147 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
27148 mach
= bfd_mach_arm_ep9312
;
27149 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
27150 mach
= bfd_mach_arm_5TE
;
27151 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
27153 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27154 mach
= bfd_mach_arm_5T
;
27156 mach
= bfd_mach_arm_5
;
27158 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
27160 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27161 mach
= bfd_mach_arm_4T
;
27163 mach
= bfd_mach_arm_4
;
27165 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
27166 mach
= bfd_mach_arm_3M
;
27167 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
27168 mach
= bfd_mach_arm_3
;
27169 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
27170 mach
= bfd_mach_arm_2a
;
27171 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
27172 mach
= bfd_mach_arm_2
;
27174 mach
= bfd_mach_arm_unknown
;
27176 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
27179 /* Command line processing. */
27182 Invocation line includes a switch not recognized by the base assembler.
27183 See if it's a processor-specific option.
27185 This routine is somewhat complicated by the need for backwards
27186 compatibility (since older releases of gcc can't be changed).
27187 The new options try to make the interface as compatible as
27190 New options (supported) are:
27192 -mcpu=<cpu name> Assemble for selected processor
27193 -march=<architecture name> Assemble for selected architecture
27194 -mfpu=<fpu architecture> Assemble for selected FPU.
27195 -EB/-mbig-endian Big-endian
27196 -EL/-mlittle-endian Little-endian
27197 -k Generate PIC code
27198 -mthumb Start in Thumb mode
27199 -mthumb-interwork Code supports ARM/Thumb interworking
27201 -m[no-]warn-deprecated Warn about deprecated features
27202 -m[no-]warn-syms Warn when symbols match instructions
27204 For now we will also provide support for:
27206 -mapcs-32 32-bit Program counter
27207 -mapcs-26 26-bit Program counter
27208 -macps-float Floats passed in FP registers
27209 -mapcs-reentrant Reentrant code
27211 (sometime these will probably be replaced with -mapcs=<list of options>
27212 and -matpcs=<list of options>)
27214 The remaining options are only supported for back-wards compatibility.
27215 Cpu variants, the arm part is optional:
27216 -m[arm]1 Currently not supported.
27217 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
27218 -m[arm]3 Arm 3 processor
27219 -m[arm]6[xx], Arm 6 processors
27220 -m[arm]7[xx][t][[d]m] Arm 7 processors
27221 -m[arm]8[10] Arm 8 processors
27222 -m[arm]9[20][tdmi] Arm 9 processors
27223 -mstrongarm[110[0]] StrongARM processors
27224 -mxscale XScale processors
27225 -m[arm]v[2345[t[e]]] Arm architectures
27226 -mall All (except the ARM1)
27228 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
27229 -mfpe-old (No float load/store multiples)
27230 -mvfpxd VFP Single precision
27232 -mno-fpu Disable all floating point instructions
27234 The following CPU names are recognized:
27235 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
27236 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
27237 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
27238 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
27239 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
27240 arm10t arm10e, arm1020t, arm1020e, arm10200e,
27241 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
27245 const char * md_shortopts
= "m:k";
27247 #ifdef ARM_BI_ENDIAN
27248 #define OPTION_EB (OPTION_MD_BASE + 0)
27249 #define OPTION_EL (OPTION_MD_BASE + 1)
27251 #if TARGET_BYTES_BIG_ENDIAN
27252 #define OPTION_EB (OPTION_MD_BASE + 0)
27254 #define OPTION_EL (OPTION_MD_BASE + 1)
27257 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
27258 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
27260 struct option md_longopts
[] =
27263 {"EB", no_argument
, NULL
, OPTION_EB
},
27266 {"EL", no_argument
, NULL
, OPTION_EL
},
27268 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
27270 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
27272 {NULL
, no_argument
, NULL
, 0}
27275 size_t md_longopts_size
= sizeof (md_longopts
);
27277 struct arm_option_table
27279 const char * option
; /* Option name to match. */
27280 const char * help
; /* Help information. */
27281 int * var
; /* Variable to change. */
27282 int value
; /* What to change it to. */
27283 const char * deprecated
; /* If non-null, print this message. */
27286 struct arm_option_table arm_opts
[] =
27288 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
27289 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
27290 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
27291 &support_interwork
, 1, NULL
},
27292 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
27293 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
27294 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
27296 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
27297 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
27298 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
27299 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
27302 /* These are recognized by the assembler, but have no affect on code. */
27303 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
27304 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
27306 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
27307 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
27308 &warn_on_deprecated
, 0, NULL
},
27309 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
27310 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
27311 {NULL
, NULL
, NULL
, 0, NULL
}
27314 struct arm_legacy_option_table
27316 const char * option
; /* Option name to match. */
27317 const arm_feature_set
** var
; /* Variable to change. */
27318 const arm_feature_set value
; /* What to change it to. */
27319 const char * deprecated
; /* If non-null, print this message. */
27322 const struct arm_legacy_option_table arm_legacy_opts
[] =
27324 /* DON'T add any new processors to this list -- we want the whole list
27325 to go away... Add them to the processors table instead. */
27326 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27327 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27328 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27329 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27330 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27331 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27332 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27333 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27334 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27335 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27336 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27337 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27338 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27339 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27340 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27341 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27342 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27343 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27344 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27345 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27346 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27347 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27348 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27349 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27350 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27351 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27352 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27353 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27354 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27355 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27356 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27357 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27358 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27359 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27360 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27361 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27362 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27363 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27364 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27365 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27366 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27367 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27368 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27369 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27370 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27371 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27372 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27373 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27374 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27375 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27376 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27377 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27378 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27379 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27380 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27381 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27382 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27383 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27384 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27385 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27386 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27387 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27388 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27389 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27390 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27391 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27392 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27393 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27394 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
27395 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
27396 N_("use -mcpu=strongarm110")},
27397 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
27398 N_("use -mcpu=strongarm1100")},
27399 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
27400 N_("use -mcpu=strongarm1110")},
27401 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
27402 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
27403 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
27405 /* Architecture variants -- don't add any more to this list either. */
27406 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27407 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27408 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27409 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27410 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27411 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27412 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27413 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27414 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27415 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27416 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27417 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27418 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27419 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27420 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27421 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27422 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27423 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27425 /* Floating point variants -- don't add any more to this list either. */
27426 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
27427 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
27428 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
27429 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
27430 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
27432 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
27435 struct arm_cpu_option_table
27439 const arm_feature_set value
;
27440 const arm_feature_set ext
;
27441 /* For some CPUs we assume an FPU unless the user explicitly sets
27443 const arm_feature_set default_fpu
;
27444 /* The canonical name of the CPU, or NULL to use NAME converted to upper
27446 const char * canonical_name
;
27449 /* This list should, at a minimum, contain all the cpu names
27450 recognized by GCC. */
27451 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
27453 static const struct arm_cpu_option_table arm_cpus
[] =
27455 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
27458 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
27461 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
27464 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
27467 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
27470 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
27473 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
27476 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
27479 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
27482 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
27485 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
27488 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
27491 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
27494 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
27497 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
27500 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
27503 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
27506 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
27509 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
27512 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
27515 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
27518 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
27521 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
27524 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
27527 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
27530 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
27533 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
27536 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
27539 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
27542 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
27545 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
27548 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
27551 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
27554 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
27557 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
27560 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
27563 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
27566 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
27569 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
27572 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
27575 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
27578 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
27581 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
27584 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
27587 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
27590 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
27594 /* For V5 or later processors we default to using VFP; but the user
27595 should really set the FPU type explicitly. */
27596 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
27599 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
27602 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
27605 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
27608 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
27611 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
27614 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
27617 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
27620 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
27623 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
27626 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
27629 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
27632 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
27635 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
27638 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
27641 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
27644 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
27647 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
27650 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
27653 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
27656 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
27659 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
27662 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
27665 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
27668 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
27671 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
27674 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
27677 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
27680 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
27683 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
27686 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
27689 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
27692 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
27695 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
27698 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
27701 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
27704 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
27705 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27707 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
27709 FPU_ARCH_NEON_VFP_V4
),
27710 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
27711 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27712 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
27713 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
27714 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27715 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
27716 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
27718 FPU_ARCH_NEON_VFP_V4
),
27719 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
27721 FPU_ARCH_NEON_VFP_V4
),
27722 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
27724 FPU_ARCH_NEON_VFP_V4
),
27725 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
27726 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27727 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27728 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
27729 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27730 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27731 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
27732 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27733 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27734 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
27735 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27736 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27737 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
27738 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27739 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27740 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
27741 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27742 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27743 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
27744 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27745 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27746 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
27747 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27748 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27749 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
27750 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27751 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27752 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
27753 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27754 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27755 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
27758 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
27760 FPU_ARCH_VFP_V3D16
),
27761 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
27762 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
27764 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
27765 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
27766 FPU_ARCH_VFP_V3D16
),
27767 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
27768 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
27769 FPU_ARCH_VFP_V3D16
),
27770 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
27771 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27772 FPU_ARCH_NEON_VFP_ARMV8
),
27773 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
27774 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27776 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
27779 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
27782 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
27785 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
27788 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
27791 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
27794 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
27797 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
27798 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27799 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27800 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
27801 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27802 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27803 /* ??? XSCALE is really an architecture. */
27804 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
27808 /* ??? iwmmxt is not a processor. */
27809 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
27812 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
27815 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
27820 ARM_CPU_OPT ("ep9312", "ARM920T",
27821 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
27822 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
27824 /* Marvell processors. */
27825 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
27826 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27827 FPU_ARCH_VFP_V3D16
),
27828 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
27829 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27830 FPU_ARCH_NEON_VFP_V4
),
27832 /* APM X-Gene family. */
27833 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
27835 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27836 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
27837 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27838 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27840 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27844 struct arm_ext_table
27848 const arm_feature_set merge
;
27849 const arm_feature_set clear
;
27852 struct arm_arch_option_table
27856 const arm_feature_set value
;
27857 const arm_feature_set default_fpu
;
27858 const struct arm_ext_table
* ext_table
;
27861 /* Used to add support for +E and +noE extension. */
27862 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27863 /* Used to add support for a +E extension. */
27864 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27865 /* Used to add support for a +noE extension. */
27866 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27868 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27869 ~0 & ~FPU_ENDIAN_PURE)
27871 static const struct arm_ext_table armv5te_ext_table
[] =
27873 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
27874 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27877 static const struct arm_ext_table armv7_ext_table
[] =
27879 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27880 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27883 static const struct arm_ext_table armv7ve_ext_table
[] =
27885 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
27886 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
27887 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27888 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27889 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27890 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
27891 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27893 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
27894 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27896 /* Aliases for +simd. */
27897 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27899 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27900 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27901 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27903 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27906 static const struct arm_ext_table armv7a_ext_table
[] =
27908 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27909 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27910 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27911 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27912 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27913 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
27914 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27916 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
27917 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27919 /* Aliases for +simd. */
27920 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27921 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27923 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27924 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27926 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
27927 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
27928 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27931 static const struct arm_ext_table armv7r_ext_table
[] =
27933 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
27934 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
27935 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27936 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27937 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
27938 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27939 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27940 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
27941 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27944 static const struct arm_ext_table armv7em_ext_table
[] =
27946 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
27947 /* Alias for +fp, used to be known as fpv4-sp-d16. */
27948 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
27949 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
27950 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27951 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
27952 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27955 static const struct arm_ext_table armv8a_ext_table
[] =
27957 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27958 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27959 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27960 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27962 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27963 should use the +simd option to turn on FP. */
27964 ARM_REMOVE ("fp", ALL_FP
),
27965 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27966 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27967 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27971 static const struct arm_ext_table armv81a_ext_table
[] =
27973 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27974 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27975 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27977 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27978 should use the +simd option to turn on FP. */
27979 ARM_REMOVE ("fp", ALL_FP
),
27980 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27981 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27982 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27985 static const struct arm_ext_table armv82a_ext_table
[] =
27987 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27988 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
27989 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
27990 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27991 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27992 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27994 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27995 should use the +simd option to turn on FP. */
27996 ARM_REMOVE ("fp", ALL_FP
),
27997 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27998 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27999 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28002 static const struct arm_ext_table armv84a_ext_table
[] =
28004 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28005 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
28006 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
28007 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28009 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28010 should use the +simd option to turn on FP. */
28011 ARM_REMOVE ("fp", ALL_FP
),
28012 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28013 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28014 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28017 static const struct arm_ext_table armv85a_ext_table
[] =
28019 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28020 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
28021 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
28022 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28024 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28025 should use the +simd option to turn on FP. */
28026 ARM_REMOVE ("fp", ALL_FP
),
28027 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28030 static const struct arm_ext_table armv8m_main_ext_table
[] =
28032 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28033 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
28034 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
28035 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
28036 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28039 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
28041 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28042 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
28044 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28045 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
28048 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28049 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
28050 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE
),
28051 ARM_FEATURE_COPROC (FPU_MVE
| FPU_MVE_FP
)),
28053 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28054 FPU_MVE
| FPU_MVE_FP
| FPU_VFP_V5_SP_D16
|
28055 FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
28056 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28059 static const struct arm_ext_table armv8r_ext_table
[] =
28061 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
28062 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
28063 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28064 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28065 ARM_REMOVE ("fp", ALL_FP
),
28066 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
28067 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28070 /* This list should, at a minimum, contain all the architecture names
28071 recognized by GCC. */
28072 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
28073 #define ARM_ARCH_OPT2(N, V, DF, ext) \
28074 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
28076 static const struct arm_arch_option_table arm_archs
[] =
28078 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
28079 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
28080 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
28081 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
28082 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
28083 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
28084 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
28085 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
28086 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
28087 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
28088 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
28089 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
28090 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
28091 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
28092 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
28093 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
28094 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
28095 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28096 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28097 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
28098 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
28099 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
28100 kept to preserve existing behaviour. */
28101 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28102 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28103 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
28104 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
28105 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
28106 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
28107 kept to preserve existing behaviour. */
28108 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28109 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28110 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
28111 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
28112 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
28113 /* The official spelling of the ARMv7 profile variants is the dashed form.
28114 Accept the non-dashed form for compatibility with old toolchains. */
28115 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28116 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
28117 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28118 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28119 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28120 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28121 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28122 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
28123 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
28124 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
28126 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
28128 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
28129 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
28130 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
28131 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
28132 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
28133 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
28134 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
28135 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
28136 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
28137 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
28138 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
28140 #undef ARM_ARCH_OPT
28142 /* ISA extensions in the co-processor and main instruction set space. */
28144 struct arm_option_extension_value_table
28148 const arm_feature_set merge_value
;
28149 const arm_feature_set clear_value
;
28150 /* List of architectures for which an extension is available. ARM_ARCH_NONE
28151 indicates that an extension is available for all architectures while
28152 ARM_ANY marks an empty entry. */
28153 const arm_feature_set allowed_archs
[2];
28156 /* The following table must be in alphabetical order with a NULL last entry. */
28158 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
28159 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
28161 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
28162 use the context sensitive approach using arm_ext_table's. */
28163 static const struct arm_option_extension_value_table arm_extensions
[] =
28165 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28166 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28167 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28168 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
28169 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28170 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
28171 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
28173 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28174 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28175 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
28176 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
28177 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28178 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28179 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28181 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28182 | ARM_EXT2_FP16_FML
),
28183 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28184 | ARM_EXT2_FP16_FML
),
28186 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28187 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28188 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28189 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28190 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
28191 Thumb divide instruction. Due to this having the same name as the
28192 previous entry, this will be ignored when doing command-line parsing and
28193 only considered by build attribute selection code. */
28194 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28195 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28196 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
28197 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
28198 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
28199 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
28200 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
28201 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
28202 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
28203 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28204 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28205 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28206 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28207 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28208 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
28210 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
28211 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
28212 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28213 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28214 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28216 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
28217 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
28218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28219 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
28220 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
28221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28222 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28225 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28226 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28227 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
28228 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28229 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
28230 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
28231 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28232 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
28234 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
28235 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28236 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
28237 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
28238 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
28242 /* ISA floating-point and Advanced SIMD extensions. */
28243 struct arm_option_fpu_value_table
28246 const arm_feature_set value
;
28249 /* This list should, at a minimum, contain all the fpu names
28250 recognized by GCC. */
28251 static const struct arm_option_fpu_value_table arm_fpus
[] =
28253 {"softfpa", FPU_NONE
},
28254 {"fpe", FPU_ARCH_FPE
},
28255 {"fpe2", FPU_ARCH_FPE
},
28256 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
28257 {"fpa", FPU_ARCH_FPA
},
28258 {"fpa10", FPU_ARCH_FPA
},
28259 {"fpa11", FPU_ARCH_FPA
},
28260 {"arm7500fe", FPU_ARCH_FPA
},
28261 {"softvfp", FPU_ARCH_VFP
},
28262 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
28263 {"vfp", FPU_ARCH_VFP_V2
},
28264 {"vfp9", FPU_ARCH_VFP_V2
},
28265 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
28266 {"vfp10", FPU_ARCH_VFP_V2
},
28267 {"vfp10-r0", FPU_ARCH_VFP_V1
},
28268 {"vfpxd", FPU_ARCH_VFP_V1xD
},
28269 {"vfpv2", FPU_ARCH_VFP_V2
},
28270 {"vfpv3", FPU_ARCH_VFP_V3
},
28271 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
28272 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
28273 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
28274 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
28275 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
28276 {"arm1020t", FPU_ARCH_VFP_V1
},
28277 {"arm1020e", FPU_ARCH_VFP_V2
},
28278 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
28279 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
28280 {"maverick", FPU_ARCH_MAVERICK
},
28281 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28282 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28283 {"neon-fp16", FPU_ARCH_NEON_FP16
},
28284 {"vfpv4", FPU_ARCH_VFP_V4
},
28285 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
28286 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
28287 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
28288 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
28289 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
28290 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
28291 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
28292 {"crypto-neon-fp-armv8",
28293 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
28294 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
28295 {"crypto-neon-fp-armv8.1",
28296 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
28297 {NULL
, ARM_ARCH_NONE
}
28300 struct arm_option_value_table
28306 static const struct arm_option_value_table arm_float_abis
[] =
28308 {"hard", ARM_FLOAT_ABI_HARD
},
28309 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
28310 {"soft", ARM_FLOAT_ABI_SOFT
},
28315 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
28316 static const struct arm_option_value_table arm_eabis
[] =
28318 {"gnu", EF_ARM_EABI_UNKNOWN
},
28319 {"4", EF_ARM_EABI_VER4
},
28320 {"5", EF_ARM_EABI_VER5
},
28325 struct arm_long_option_table
28327 const char * option
; /* Substring to match. */
28328 const char * help
; /* Help information. */
28329 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
28330 const char * deprecated
; /* If non-null, print this message. */
28334 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
28335 arm_feature_set
*ext_set
,
28336 const struct arm_ext_table
*ext_table
)
28338 /* We insist on extensions being specified in alphabetical order, and with
28339 extensions being added before being removed. We achieve this by having
28340 the global ARM_EXTENSIONS table in alphabetical order, and using the
28341 ADDING_VALUE variable to indicate whether we are adding an extension (1)
28342 or removing it (0) and only allowing it to change in the order
28344 const struct arm_option_extension_value_table
* opt
= NULL
;
28345 const arm_feature_set arm_any
= ARM_ANY
;
28346 int adding_value
= -1;
28348 while (str
!= NULL
&& *str
!= 0)
28355 as_bad (_("invalid architectural extension"));
28360 ext
= strchr (str
, '+');
28365 len
= strlen (str
);
28367 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
28369 if (adding_value
!= 0)
28372 opt
= arm_extensions
;
28380 if (adding_value
== -1)
28383 opt
= arm_extensions
;
28385 else if (adding_value
!= 1)
28387 as_bad (_("must specify extensions to add before specifying "
28388 "those to remove"));
28395 as_bad (_("missing architectural extension"));
28399 gas_assert (adding_value
!= -1);
28400 gas_assert (opt
!= NULL
);
28402 if (ext_table
!= NULL
)
28404 const struct arm_ext_table
* ext_opt
= ext_table
;
28405 bfd_boolean found
= FALSE
;
28406 for (; ext_opt
->name
!= NULL
; ext_opt
++)
28407 if (ext_opt
->name_len
== len
28408 && strncmp (ext_opt
->name
, str
, len
) == 0)
28412 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
28413 /* TODO: Option not supported. When we remove the
28414 legacy table this case should error out. */
28417 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
28421 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
28422 /* TODO: Option not supported. When we remove the
28423 legacy table this case should error out. */
28425 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
28437 /* Scan over the options table trying to find an exact match. */
28438 for (; opt
->name
!= NULL
; opt
++)
28439 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28441 int i
, nb_allowed_archs
=
28442 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28443 /* Check we can apply the extension to this architecture. */
28444 for (i
= 0; i
< nb_allowed_archs
; i
++)
28447 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
28449 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
28452 if (i
== nb_allowed_archs
)
28454 as_bad (_("extension does not apply to the base architecture"));
28458 /* Add or remove the extension. */
28460 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
28462 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
28464 /* Allowing Thumb division instructions for ARMv7 in autodetection
28465 rely on this break so that duplicate extensions (extensions
28466 with the same name as a previous extension in the list) are not
28467 considered for command-line parsing. */
28471 if (opt
->name
== NULL
)
28473 /* Did we fail to find an extension because it wasn't specified in
28474 alphabetical order, or because it does not exist? */
28476 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28477 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28480 if (opt
->name
== NULL
)
28481 as_bad (_("unknown architectural extension `%s'"), str
);
28483 as_bad (_("architectural extensions must be specified in "
28484 "alphabetical order"));
28490 /* We should skip the extension we've just matched the next time
28502 arm_parse_cpu (const char *str
)
28504 const struct arm_cpu_option_table
*opt
;
28505 const char *ext
= strchr (str
, '+');
28511 len
= strlen (str
);
28515 as_bad (_("missing cpu name `%s'"), str
);
28519 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
28520 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28522 mcpu_cpu_opt
= &opt
->value
;
28523 if (mcpu_ext_opt
== NULL
)
28524 mcpu_ext_opt
= XNEW (arm_feature_set
);
28525 *mcpu_ext_opt
= opt
->ext
;
28526 mcpu_fpu_opt
= &opt
->default_fpu
;
28527 if (opt
->canonical_name
)
28529 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
28530 strcpy (selected_cpu_name
, opt
->canonical_name
);
28536 if (len
>= sizeof selected_cpu_name
)
28537 len
= (sizeof selected_cpu_name
) - 1;
28539 for (i
= 0; i
< len
; i
++)
28540 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
28541 selected_cpu_name
[i
] = 0;
28545 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
28550 as_bad (_("unknown cpu `%s'"), str
);
28555 arm_parse_arch (const char *str
)
28557 const struct arm_arch_option_table
*opt
;
28558 const char *ext
= strchr (str
, '+');
28564 len
= strlen (str
);
28568 as_bad (_("missing architecture name `%s'"), str
);
28572 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
28573 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28575 march_cpu_opt
= &opt
->value
;
28576 if (march_ext_opt
== NULL
)
28577 march_ext_opt
= XNEW (arm_feature_set
);
28578 *march_ext_opt
= arm_arch_none
;
28579 march_fpu_opt
= &opt
->default_fpu
;
28580 strcpy (selected_cpu_name
, opt
->name
);
28583 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
28589 as_bad (_("unknown architecture `%s'\n"), str
);
28594 arm_parse_fpu (const char * str
)
28596 const struct arm_option_fpu_value_table
* opt
;
28598 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
28599 if (streq (opt
->name
, str
))
28601 mfpu_opt
= &opt
->value
;
28605 as_bad (_("unknown floating point format `%s'\n"), str
);
28610 arm_parse_float_abi (const char * str
)
28612 const struct arm_option_value_table
* opt
;
28614 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
28615 if (streq (opt
->name
, str
))
28617 mfloat_abi_opt
= opt
->value
;
28621 as_bad (_("unknown floating point abi `%s'\n"), str
);
28627 arm_parse_eabi (const char * str
)
28629 const struct arm_option_value_table
*opt
;
28631 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
28632 if (streq (opt
->name
, str
))
28634 meabi_flags
= opt
->value
;
28637 as_bad (_("unknown EABI `%s'\n"), str
);
28643 arm_parse_it_mode (const char * str
)
28645 bfd_boolean ret
= TRUE
;
28647 if (streq ("arm", str
))
28648 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
28649 else if (streq ("thumb", str
))
28650 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
28651 else if (streq ("always", str
))
28652 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
28653 else if (streq ("never", str
))
28654 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
28657 as_bad (_("unknown implicit IT mode `%s', should be "\
28658 "arm, thumb, always, or never."), str
);
28666 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
28668 codecomposer_syntax
= TRUE
;
28669 arm_comment_chars
[0] = ';';
28670 arm_line_separator_chars
[0] = 0;
28674 struct arm_long_option_table arm_long_opts
[] =
28676 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
28677 arm_parse_cpu
, NULL
},
28678 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
28679 arm_parse_arch
, NULL
},
28680 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
28681 arm_parse_fpu
, NULL
},
28682 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
28683 arm_parse_float_abi
, NULL
},
28685 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
28686 arm_parse_eabi
, NULL
},
28688 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
28689 arm_parse_it_mode
, NULL
},
28690 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
28691 arm_ccs_mode
, NULL
},
28692 {NULL
, NULL
, 0, NULL
}
28696 md_parse_option (int c
, const char * arg
)
28698 struct arm_option_table
*opt
;
28699 const struct arm_legacy_option_table
*fopt
;
28700 struct arm_long_option_table
*lopt
;
28706 target_big_endian
= 1;
28712 target_big_endian
= 0;
28716 case OPTION_FIX_V4BX
:
28724 #endif /* OBJ_ELF */
28727 /* Listing option. Just ignore these, we don't support additional
28732 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
28734 if (c
== opt
->option
[0]
28735 && ((arg
== NULL
&& opt
->option
[1] == 0)
28736 || streq (arg
, opt
->option
+ 1)))
28738 /* If the option is deprecated, tell the user. */
28739 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
28740 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
28741 arg
? arg
: "", _(opt
->deprecated
));
28743 if (opt
->var
!= NULL
)
28744 *opt
->var
= opt
->value
;
28750 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
28752 if (c
== fopt
->option
[0]
28753 && ((arg
== NULL
&& fopt
->option
[1] == 0)
28754 || streq (arg
, fopt
->option
+ 1)))
28756 /* If the option is deprecated, tell the user. */
28757 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
28758 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
28759 arg
? arg
: "", _(fopt
->deprecated
));
28761 if (fopt
->var
!= NULL
)
28762 *fopt
->var
= &fopt
->value
;
28768 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
28770 /* These options are expected to have an argument. */
28771 if (c
== lopt
->option
[0]
28773 && strncmp (arg
, lopt
->option
+ 1,
28774 strlen (lopt
->option
+ 1)) == 0)
28776 /* If the option is deprecated, tell the user. */
28777 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
28778 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
28779 _(lopt
->deprecated
));
28781 /* Call the sup-option parser. */
28782 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
28793 md_show_usage (FILE * fp
)
28795 struct arm_option_table
*opt
;
28796 struct arm_long_option_table
*lopt
;
28798 fprintf (fp
, _(" ARM-specific assembler options:\n"));
28800 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
28801 if (opt
->help
!= NULL
)
28802 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
28804 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
28805 if (lopt
->help
!= NULL
)
28806 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
28810 -EB assemble code for a big-endian cpu\n"));
28815 -EL assemble code for a little-endian cpu\n"));
28819 --fix-v4bx Allow BX in ARMv4 code\n"));
28823 --fdpic generate an FDPIC object file\n"));
28824 #endif /* OBJ_ELF */
28832 arm_feature_set flags
;
28833 } cpu_arch_ver_table
;
28835 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28836 chronologically for architectures, with an exception for ARMv6-M and
28837 ARMv6S-M due to legacy reasons. No new architecture should have a
28838 special case. This allows for build attribute selection results to be
28839 stable when new architectures are added. */
28840 static const cpu_arch_ver_table cpu_arch_ver
[] =
28842 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
28843 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
28844 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
28845 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
28846 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
28847 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
28848 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
28849 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
28850 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
28851 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
28852 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
28853 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
28854 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
28855 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
28856 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
28857 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
28858 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
28859 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
28860 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
28861 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
28862 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
28863 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
28864 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
28865 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
28867 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28868 always selected build attributes to match those of ARMv6-M
28869 (resp. ARMv6S-M). However, due to these architectures being a strict
28870 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28871 would be selected when fully respecting chronology of architectures.
28872 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28873 move them before ARMv7 architectures. */
28874 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
28875 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
28877 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
28878 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
28879 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
28880 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
28881 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
28882 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
28883 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
28884 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
28885 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
28886 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
28887 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
28888 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
28889 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
28890 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
28891 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
28892 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
28893 {-1, ARM_ARCH_NONE
}
28896 /* Set an attribute if it has not already been set by the user. */
28899 aeabi_set_attribute_int (int tag
, int value
)
28902 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28903 || !attributes_set_explicitly
[tag
])
28904 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
28908 aeabi_set_attribute_string (int tag
, const char *value
)
28911 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28912 || !attributes_set_explicitly
[tag
])
28913 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
28916 /* Return whether features in the *NEEDED feature set are available via
28917 extensions for the architecture whose feature set is *ARCH_FSET. */
28920 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
28921 const arm_feature_set
*needed
)
28923 int i
, nb_allowed_archs
;
28924 arm_feature_set ext_fset
;
28925 const struct arm_option_extension_value_table
*opt
;
28927 ext_fset
= arm_arch_none
;
28928 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28930 /* Extension does not provide any feature we need. */
28931 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
28935 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28936 for (i
= 0; i
< nb_allowed_archs
; i
++)
28939 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
28942 /* Extension is available, add it. */
28943 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
28944 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
28948 /* Can we enable all features in *needed? */
28949 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
28952 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
28953 a given architecture feature set *ARCH_EXT_FSET including extension feature
28954 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
28955 - if true, check for an exact match of the architecture modulo extensions;
28956 - otherwise, select build attribute value of the first superset
28957 architecture released so that results remains stable when new architectures
28959 For -march/-mcpu=all the build attribute value of the most featureful
28960 architecture is returned. Tag_CPU_arch_profile result is returned in
28964 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
28965 const arm_feature_set
*ext_fset
,
28966 char *profile
, int exact_match
)
28968 arm_feature_set arch_fset
;
28969 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
28971 /* Select most featureful architecture with all its extensions if building
28972 for -march=all as the feature sets used to set build attributes. */
28973 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
28975 /* Force revisiting of decision for each new architecture. */
28976 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8_1M_MAIN
);
28978 return TAG_CPU_ARCH_V8
;
28981 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
28983 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
28985 arm_feature_set known_arch_fset
;
28987 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
28990 /* Base architecture match user-specified architecture and
28991 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
28992 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
28997 /* Base architecture match user-specified architecture only
28998 (eg. ARMv6-M in the same case as above). Record it in case we
28999 find a match with above condition. */
29000 else if (p_ver_ret
== NULL
29001 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
29007 /* Architecture has all features wanted. */
29008 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
29010 arm_feature_set added_fset
;
29012 /* Compute features added by this architecture over the one
29013 recorded in p_ver_ret. */
29014 if (p_ver_ret
!= NULL
)
29015 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
29017 /* First architecture that match incl. with extensions, or the
29018 only difference in features over the recorded match is
29019 features that were optional and are now mandatory. */
29020 if (p_ver_ret
== NULL
29021 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
29027 else if (p_ver_ret
== NULL
)
29029 arm_feature_set needed_ext_fset
;
29031 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
29033 /* Architecture has all features needed when using some
29034 extensions. Record it and continue searching in case there
29035 exist an architecture providing all needed features without
29036 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
29038 if (have_ext_for_needed_feat_p (&known_arch_fset
,
29045 if (p_ver_ret
== NULL
)
29049 /* Tag_CPU_arch_profile. */
29050 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
29051 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
29052 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
29053 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
29055 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
29057 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
29061 return p_ver_ret
->val
;
29064 /* Set the public EABI object attributes. */
29067 aeabi_set_public_attributes (void)
29069 char profile
= '\0';
29072 int fp16_optional
= 0;
29073 int skip_exact_match
= 0;
29074 arm_feature_set flags
, flags_arch
, flags_ext
;
29076 /* Autodetection mode, choose the architecture based the instructions
29078 if (no_cpu_selected ())
29080 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
29082 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
29083 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
29085 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
29086 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
29088 /* Code run during relaxation relies on selected_cpu being set. */
29089 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
29090 flags_ext
= arm_arch_none
;
29091 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
29092 selected_ext
= flags_ext
;
29093 selected_cpu
= flags
;
29095 /* Otherwise, choose the architecture based on the capabilities of the
29099 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
29100 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
29101 flags_ext
= selected_ext
;
29102 flags
= selected_cpu
;
29104 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
29106 /* Allow the user to override the reported architecture. */
29107 if (!ARM_FEATURE_ZERO (selected_object_arch
))
29109 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
29110 flags_ext
= arm_arch_none
;
29113 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
29115 /* When this function is run again after relaxation has happened there is no
29116 way to determine whether an architecture or CPU was specified by the user:
29117 - selected_cpu is set above for relaxation to work;
29118 - march_cpu_opt is not set if only -mcpu or .cpu is used;
29119 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
29120 Therefore, if not in -march=all case we first try an exact match and fall
29121 back to autodetection. */
29122 if (!skip_exact_match
)
29123 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
29125 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
29127 as_bad (_("no architecture contains all the instructions used\n"));
29129 /* Tag_CPU_name. */
29130 if (selected_cpu_name
[0])
29134 q
= selected_cpu_name
;
29135 if (strncmp (q
, "armv", 4) == 0)
29140 for (i
= 0; q
[i
]; i
++)
29141 q
[i
] = TOUPPER (q
[i
]);
29143 aeabi_set_attribute_string (Tag_CPU_name
, q
);
29146 /* Tag_CPU_arch. */
29147 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
29149 /* Tag_CPU_arch_profile. */
29150 if (profile
!= '\0')
29151 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
29153 /* Tag_DSP_extension. */
29154 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
29155 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
29157 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
29158 /* Tag_ARM_ISA_use. */
29159 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
29160 || ARM_FEATURE_ZERO (flags_arch
))
29161 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
29163 /* Tag_THUMB_ISA_use. */
29164 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
29165 || ARM_FEATURE_ZERO (flags_arch
))
29169 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29170 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
29172 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
29176 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
29179 /* Tag_VFP_arch. */
29180 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
29181 aeabi_set_attribute_int (Tag_VFP_arch
,
29182 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29184 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
29185 aeabi_set_attribute_int (Tag_VFP_arch
,
29186 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29188 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
29191 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
29193 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
29195 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
29198 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
29199 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
29200 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
29201 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
29202 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
29204 /* Tag_ABI_HardFP_use. */
29205 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
29206 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
29207 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
29209 /* Tag_WMMX_arch. */
29210 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
29211 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
29212 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
29213 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
29215 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
29216 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
29217 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
29218 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
29219 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
29220 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
29222 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
29224 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
29228 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
29233 if (ARM_CPU_HAS_FEATURE (flags
, mve_fp_ext
))
29234 aeabi_set_attribute_int (Tag_MVE_arch
, 2);
29235 else if (ARM_CPU_HAS_FEATURE (flags
, mve_ext
))
29236 aeabi_set_attribute_int (Tag_MVE_arch
, 1);
29238 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
29239 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
29240 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
29244 We set Tag_DIV_use to two when integer divide instructions have been used
29245 in ARM state, or when Thumb integer divide instructions have been used,
29246 but we have no architecture profile set, nor have we any ARM instructions.
29248 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
29249 by the base architecture.
29251 For new architectures we will have to check these tests. */
29252 gas_assert (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
29253 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29254 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
29255 aeabi_set_attribute_int (Tag_DIV_use
, 0);
29256 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
29257 || (profile
== '\0'
29258 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
29259 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
29260 aeabi_set_attribute_int (Tag_DIV_use
, 2);
29262 /* Tag_MP_extension_use. */
29263 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
29264 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
29266 /* Tag Virtualization_use. */
29267 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
29269 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
29272 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
29275 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
29276 finished and free extension feature bits which will not be used anymore. */
29279 arm_md_post_relax (void)
29281 aeabi_set_public_attributes ();
29282 XDELETE (mcpu_ext_opt
);
29283 mcpu_ext_opt
= NULL
;
29284 XDELETE (march_ext_opt
);
29285 march_ext_opt
= NULL
;
29288 /* Add the default contents for the .ARM.attributes section. */
29293 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
29296 aeabi_set_public_attributes ();
29298 #endif /* OBJ_ELF */
29300 /* Parse a .cpu directive. */
29303 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
29305 const struct arm_cpu_option_table
*opt
;
29309 name
= input_line_pointer
;
29310 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29311 input_line_pointer
++;
29312 saved_char
= *input_line_pointer
;
29313 *input_line_pointer
= 0;
29315 /* Skip the first "all" entry. */
29316 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
29317 if (streq (opt
->name
, name
))
29319 selected_arch
= opt
->value
;
29320 selected_ext
= opt
->ext
;
29321 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29322 if (opt
->canonical_name
)
29323 strcpy (selected_cpu_name
, opt
->canonical_name
);
29327 for (i
= 0; opt
->name
[i
]; i
++)
29328 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
29330 selected_cpu_name
[i
] = 0;
29332 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29334 *input_line_pointer
= saved_char
;
29335 demand_empty_rest_of_line ();
29338 as_bad (_("unknown cpu `%s'"), name
);
29339 *input_line_pointer
= saved_char
;
29340 ignore_rest_of_line ();
29343 /* Parse a .arch directive. */
29346 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
29348 const struct arm_arch_option_table
*opt
;
29352 name
= input_line_pointer
;
29353 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29354 input_line_pointer
++;
29355 saved_char
= *input_line_pointer
;
29356 *input_line_pointer
= 0;
29358 /* Skip the first "all" entry. */
29359 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29360 if (streq (opt
->name
, name
))
29362 selected_arch
= opt
->value
;
29363 selected_ext
= arm_arch_none
;
29364 selected_cpu
= selected_arch
;
29365 strcpy (selected_cpu_name
, opt
->name
);
29366 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29367 *input_line_pointer
= saved_char
;
29368 demand_empty_rest_of_line ();
29372 as_bad (_("unknown architecture `%s'\n"), name
);
29373 *input_line_pointer
= saved_char
;
29374 ignore_rest_of_line ();
29377 /* Parse a .object_arch directive. */
29380 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
29382 const struct arm_arch_option_table
*opt
;
29386 name
= input_line_pointer
;
29387 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29388 input_line_pointer
++;
29389 saved_char
= *input_line_pointer
;
29390 *input_line_pointer
= 0;
29392 /* Skip the first "all" entry. */
29393 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29394 if (streq (opt
->name
, name
))
29396 selected_object_arch
= opt
->value
;
29397 *input_line_pointer
= saved_char
;
29398 demand_empty_rest_of_line ();
29402 as_bad (_("unknown architecture `%s'\n"), name
);
29403 *input_line_pointer
= saved_char
;
29404 ignore_rest_of_line ();
29407 /* Parse a .arch_extension directive. */
29410 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
29412 const struct arm_option_extension_value_table
*opt
;
29415 int adding_value
= 1;
29417 name
= input_line_pointer
;
29418 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29419 input_line_pointer
++;
29420 saved_char
= *input_line_pointer
;
29421 *input_line_pointer
= 0;
29423 if (strlen (name
) >= 2
29424 && strncmp (name
, "no", 2) == 0)
29430 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
29431 if (streq (opt
->name
, name
))
29433 int i
, nb_allowed_archs
=
29434 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
29435 for (i
= 0; i
< nb_allowed_archs
; i
++)
29438 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
29440 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
29444 if (i
== nb_allowed_archs
)
29446 as_bad (_("architectural extension `%s' is not allowed for the "
29447 "current base architecture"), name
);
29452 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
29455 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
29457 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29458 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29459 *input_line_pointer
= saved_char
;
29460 demand_empty_rest_of_line ();
29461 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
29462 on this return so that duplicate extensions (extensions with the
29463 same name as a previous extension in the list) are not considered
29464 for command-line parsing. */
29468 if (opt
->name
== NULL
)
29469 as_bad (_("unknown architecture extension `%s'\n"), name
);
29471 *input_line_pointer
= saved_char
;
29472 ignore_rest_of_line ();
29475 /* Parse a .fpu directive. */
29478 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
29480 const struct arm_option_fpu_value_table
*opt
;
29484 name
= input_line_pointer
;
29485 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29486 input_line_pointer
++;
29487 saved_char
= *input_line_pointer
;
29488 *input_line_pointer
= 0;
29490 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
29491 if (streq (opt
->name
, name
))
29493 selected_fpu
= opt
->value
;
29494 #ifndef CPU_DEFAULT
29495 if (no_cpu_selected ())
29496 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
29499 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29500 *input_line_pointer
= saved_char
;
29501 demand_empty_rest_of_line ();
29505 as_bad (_("unknown floating point format `%s'\n"), name
);
29506 *input_line_pointer
= saved_char
;
29507 ignore_rest_of_line ();
29510 /* Copy symbol information. */
29513 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
29515 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
29519 /* Given a symbolic attribute NAME, return the proper integer value.
29520 Returns -1 if the attribute is not known. */
29523 arm_convert_symbolic_attribute (const char *name
)
29525 static const struct
29530 attribute_table
[] =
29532 /* When you modify this table you should
29533 also modify the list in doc/c-arm.texi. */
29534 #define T(tag) {#tag, tag}
29535 T (Tag_CPU_raw_name
),
29538 T (Tag_CPU_arch_profile
),
29539 T (Tag_ARM_ISA_use
),
29540 T (Tag_THUMB_ISA_use
),
29544 T (Tag_Advanced_SIMD_arch
),
29545 T (Tag_PCS_config
),
29546 T (Tag_ABI_PCS_R9_use
),
29547 T (Tag_ABI_PCS_RW_data
),
29548 T (Tag_ABI_PCS_RO_data
),
29549 T (Tag_ABI_PCS_GOT_use
),
29550 T (Tag_ABI_PCS_wchar_t
),
29551 T (Tag_ABI_FP_rounding
),
29552 T (Tag_ABI_FP_denormal
),
29553 T (Tag_ABI_FP_exceptions
),
29554 T (Tag_ABI_FP_user_exceptions
),
29555 T (Tag_ABI_FP_number_model
),
29556 T (Tag_ABI_align_needed
),
29557 T (Tag_ABI_align8_needed
),
29558 T (Tag_ABI_align_preserved
),
29559 T (Tag_ABI_align8_preserved
),
29560 T (Tag_ABI_enum_size
),
29561 T (Tag_ABI_HardFP_use
),
29562 T (Tag_ABI_VFP_args
),
29563 T (Tag_ABI_WMMX_args
),
29564 T (Tag_ABI_optimization_goals
),
29565 T (Tag_ABI_FP_optimization_goals
),
29566 T (Tag_compatibility
),
29567 T (Tag_CPU_unaligned_access
),
29568 T (Tag_FP_HP_extension
),
29569 T (Tag_VFP_HP_extension
),
29570 T (Tag_ABI_FP_16bit_format
),
29571 T (Tag_MPextension_use
),
29573 T (Tag_nodefaults
),
29574 T (Tag_also_compatible_with
),
29575 T (Tag_conformance
),
29577 T (Tag_Virtualization_use
),
29578 T (Tag_DSP_extension
),
29580 /* We deliberately do not include Tag_MPextension_use_legacy. */
29588 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
29589 if (streq (name
, attribute_table
[i
].name
))
29590 return attribute_table
[i
].tag
;
29595 /* Apply sym value for relocations only in the case that they are for
29596 local symbols in the same segment as the fixup and you have the
29597 respective architectural feature for blx and simple switches. */
29600 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
29603 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
29604 /* PR 17444: If the local symbol is in a different section then a reloc
29605 will always be generated for it, so applying the symbol value now
29606 will result in a double offset being stored in the relocation. */
29607 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
29608 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
29610 switch (fixP
->fx_r_type
)
29612 case BFD_RELOC_ARM_PCREL_BLX
:
29613 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
29614 if (ARM_IS_FUNC (fixP
->fx_addsy
))
29618 case BFD_RELOC_ARM_PCREL_CALL
:
29619 case BFD_RELOC_THUMB_PCREL_BLX
:
29620 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
29631 #endif /* OBJ_ELF */