1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
165 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
167 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
170 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
173 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
174 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
175 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
176 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
177 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
178 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
179 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
180 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
181 static const arm_feature_set arm_ext_v4t_5
=
182 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
183 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
184 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
185 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
186 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
187 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
188 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
189 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
190 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
191 static const arm_feature_set arm_ext_v6_notm
=
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
193 static const arm_feature_set arm_ext_v6_dsp
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
195 static const arm_feature_set arm_ext_barrier
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
197 static const arm_feature_set arm_ext_msr
=
198 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
199 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
200 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
201 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
202 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
204 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
206 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
207 static const arm_feature_set arm_ext_m
=
208 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
,
209 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
210 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
211 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
212 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
213 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
214 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
215 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
216 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
217 static const arm_feature_set arm_ext_v8m_main
=
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
219 /* Instructions in ARMv8-M only found in M profile architectures. */
220 static const arm_feature_set arm_ext_v8m_m_only
=
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
222 static const arm_feature_set arm_ext_v6t2_v8m
=
223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
224 /* Instructions shared between ARMv8-A and ARMv8-M. */
225 static const arm_feature_set arm_ext_atomics
=
226 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
228 /* DSP instructions Tag_DSP_extension refers to. */
229 static const arm_feature_set arm_ext_dsp
=
230 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
232 static const arm_feature_set arm_ext_ras
=
233 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
234 /* FP16 instructions. */
235 static const arm_feature_set arm_ext_fp16
=
236 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
238 static const arm_feature_set arm_arch_any
= ARM_ANY
;
239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
240 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
241 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
243 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
246 static const arm_feature_set arm_cext_iwmmxt2
=
247 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
248 static const arm_feature_set arm_cext_iwmmxt
=
249 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
250 static const arm_feature_set arm_cext_xscale
=
251 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
252 static const arm_feature_set arm_cext_maverick
=
253 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
254 static const arm_feature_set fpu_fpa_ext_v1
=
255 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
256 static const arm_feature_set fpu_fpa_ext_v2
=
257 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
258 static const arm_feature_set fpu_vfp_ext_v1xd
=
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
260 static const arm_feature_set fpu_vfp_ext_v1
=
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
262 static const arm_feature_set fpu_vfp_ext_v2
=
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
264 static const arm_feature_set fpu_vfp_ext_v3xd
=
265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
266 static const arm_feature_set fpu_vfp_ext_v3
=
267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
268 static const arm_feature_set fpu_vfp_ext_d32
=
269 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
270 static const arm_feature_set fpu_neon_ext_v1
=
271 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
272 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
273 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
275 static const arm_feature_set fpu_vfp_fp16
=
276 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
277 static const arm_feature_set fpu_neon_ext_fma
=
278 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
280 static const arm_feature_set fpu_vfp_ext_fma
=
281 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
282 static const arm_feature_set fpu_vfp_ext_armv8
=
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
284 static const arm_feature_set fpu_vfp_ext_armv8xd
=
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
286 static const arm_feature_set fpu_neon_ext_armv8
=
287 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
288 static const arm_feature_set fpu_crypto_ext_armv8
=
289 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
290 static const arm_feature_set crc_ext_armv8
=
291 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
292 static const arm_feature_set fpu_neon_ext_v8_1
=
293 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
295 static int mfloat_abi_opt
= -1;
296 /* Record user cpu selection for object attributes. */
297 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
298 /* Must be long enough to hold any of the names in arm_cpus. */
299 static char selected_cpu_name
[20];
301 extern FLONUM_TYPE generic_floating_point_number
;
303 /* Return if no cpu was selected on command-line. */
305 no_cpu_selected (void)
307 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
312 static int meabi_flags
= EABI_DEFAULT
;
314 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
317 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
322 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
327 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
328 symbolS
* GOT_symbol
;
331 /* 0: assemble for ARM,
332 1: assemble for Thumb,
333 2: assemble for Thumb even though target CPU does not support thumb
335 static int thumb_mode
= 0;
336 /* A value distinct from the possible values for thumb_mode that we
337 can use to record whether thumb_mode has been copied into the
338 tc_frag_data field of a frag. */
339 #define MODE_RECORDED (1 << 4)
341 /* Specifies the intrinsic IT insn behavior mode. */
342 enum implicit_it_mode
344 IMPLICIT_IT_MODE_NEVER
= 0x00,
345 IMPLICIT_IT_MODE_ARM
= 0x01,
346 IMPLICIT_IT_MODE_THUMB
= 0x02,
347 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
349 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
351 /* If unified_syntax is true, we are processing the new unified
352 ARM/Thumb syntax. Important differences from the old ARM mode:
354 - Immediate operands do not require a # prefix.
355 - Conditional affixes always appear at the end of the
356 instruction. (For backward compatibility, those instructions
357 that formerly had them in the middle, continue to accept them
359 - The IT instruction may appear, and if it does is validated
360 against subsequent conditional affixes. It does not generate
363 Important differences from the old Thumb mode:
365 - Immediate operands do not require a # prefix.
366 - Most of the V6T2 instructions are only available in unified mode.
367 - The .N and .W suffixes are recognized and honored (it is an error
368 if they cannot be honored).
369 - All instructions set the flags if and only if they have an 's' affix.
370 - Conditional affixes may be used. They are validated against
371 preceding IT instructions. Unlike ARM mode, you cannot use a
372 conditional affix except in the scope of an IT instruction. */
374 static bfd_boolean unified_syntax
= FALSE
;
376 /* An immediate operand can start with #, and ld*, st*, pld operands
377 can contain [ and ]. We need to tell APP not to elide whitespace
378 before a [, which can appear as the first operand for pld.
379 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
380 const char arm_symbol_chars
[] = "#[]{}";
395 enum neon_el_type type
;
399 #define NEON_MAX_TYPE_ELS 4
403 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
407 enum it_instruction_type
412 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
413 if inside, should be the last one. */
414 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
415 i.e. BKPT and NOP. */
416 IT_INSN
/* The IT insn has been parsed. */
419 /* The maximum number of operands we need. */
420 #define ARM_IT_MAX_OPERANDS 6
425 unsigned long instruction
;
429 /* "uncond_value" is set to the value in place of the conditional field in
430 unconditional versions of the instruction, or -1 if nothing is
433 struct neon_type vectype
;
434 /* This does not indicate an actual NEON instruction, only that
435 the mnemonic accepts neon-style type suffixes. */
437 /* Set to the opcode if the instruction needs relaxation.
438 Zero if the instruction is not relaxed. */
442 bfd_reloc_code_real_type type
;
447 enum it_instruction_type it_insn_type
;
453 struct neon_type_el vectype
;
454 unsigned present
: 1; /* Operand present. */
455 unsigned isreg
: 1; /* Operand was a register. */
456 unsigned immisreg
: 1; /* .imm field is a second register. */
457 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
458 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
459 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
460 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
461 instructions. This allows us to disambiguate ARM <-> vector insns. */
462 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
463 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
464 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
465 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
466 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
467 unsigned writeback
: 1; /* Operand has trailing ! */
468 unsigned preind
: 1; /* Preindexed address. */
469 unsigned postind
: 1; /* Postindexed address. */
470 unsigned negative
: 1; /* Index register was negated. */
471 unsigned shifted
: 1; /* Shift applied to operation. */
472 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
473 } operands
[ARM_IT_MAX_OPERANDS
];
476 static struct arm_it inst
;
478 #define NUM_FLOAT_VALS 8
480 const char * fp_const
[] =
482 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
485 /* Number of littlenums required to hold an extended precision number. */
486 #define MAX_LITTLENUMS 6
488 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
498 #define CP_T_X 0x00008000
499 #define CP_T_Y 0x00400000
501 #define CONDS_BIT 0x00100000
502 #define LOAD_BIT 0x00100000
504 #define DOUBLE_LOAD_FLAG 0x00000001
508 const char * template_name
;
512 #define COND_ALWAYS 0xE
516 const char * template_name
;
520 struct asm_barrier_opt
522 const char * template_name
;
524 const arm_feature_set arch
;
527 /* The bit that distinguishes CPSR and SPSR. */
528 #define SPSR_BIT (1 << 22)
530 /* The individual PSR flag bits. */
531 #define PSR_c (1 << 16)
532 #define PSR_x (1 << 17)
533 #define PSR_s (1 << 18)
534 #define PSR_f (1 << 19)
539 bfd_reloc_code_real_type reloc
;
544 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
545 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
550 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
553 /* Bits for DEFINED field in neon_typed_alias. */
554 #define NTA_HASTYPE 1
555 #define NTA_HASINDEX 2
557 struct neon_typed_alias
559 unsigned char defined
;
561 struct neon_type_el eltype
;
564 /* ARM register categories. This includes coprocessor numbers and various
565 architecture extensions' registers. */
592 /* Structure for a hash table entry for a register.
593 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
594 information which states whether a vector type or index is specified (for a
595 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
601 unsigned char builtin
;
602 struct neon_typed_alias
* neon
;
605 /* Diagnostics used when we don't get a register of the expected type. */
606 const char * const reg_expected_msgs
[] =
608 N_("ARM register expected"),
609 N_("bad or missing co-processor number"),
610 N_("co-processor register expected"),
611 N_("FPA register expected"),
612 N_("VFP single precision register expected"),
613 N_("VFP/Neon double precision register expected"),
614 N_("Neon quad precision register expected"),
615 N_("VFP single or double precision register expected"),
616 N_("Neon double or quad precision register expected"),
617 N_("VFP single, double or Neon quad precision register expected"),
618 N_("VFP system register expected"),
619 N_("Maverick MVF register expected"),
620 N_("Maverick MVD register expected"),
621 N_("Maverick MVFX register expected"),
622 N_("Maverick MVDX register expected"),
623 N_("Maverick MVAX register expected"),
624 N_("Maverick DSPSC register expected"),
625 N_("iWMMXt data register expected"),
626 N_("iWMMXt control register expected"),
627 N_("iWMMXt scalar register expected"),
628 N_("XScale accumulator register expected"),
631 /* Some well known registers that we refer to directly elsewhere. */
637 /* ARM instructions take 4bytes in the object file, Thumb instructions
643 /* Basic string to match. */
644 const char * template_name
;
646 /* Parameters to instruction. */
647 unsigned int operands
[8];
649 /* Conditional tag - see opcode_lookup. */
650 unsigned int tag
: 4;
652 /* Basic instruction code. */
653 unsigned int avalue
: 28;
655 /* Thumb-format instruction code. */
658 /* Which architecture variant provides this instruction. */
659 const arm_feature_set
* avariant
;
660 const arm_feature_set
* tvariant
;
662 /* Function to call to encode instruction in ARM format. */
663 void (* aencode
) (void);
665 /* Function to call to encode instruction in Thumb format. */
666 void (* tencode
) (void);
669 /* Defines for various bits that we will want to toggle. */
670 #define INST_IMMEDIATE 0x02000000
671 #define OFFSET_REG 0x02000000
672 #define HWOFFSET_IMM 0x00400000
673 #define SHIFT_BY_REG 0x00000010
674 #define PRE_INDEX 0x01000000
675 #define INDEX_UP 0x00800000
676 #define WRITE_BACK 0x00200000
677 #define LDM_TYPE_2_OR_3 0x00400000
678 #define CPSI_MMOD 0x00020000
680 #define LITERAL_MASK 0xf000f000
681 #define OPCODE_MASK 0xfe1fffff
682 #define V4_STR_BIT 0x00000020
683 #define VLDR_VMOV_SAME 0x0040f000
685 #define T2_SUBS_PC_LR 0xf3de8f00
687 #define DATA_OP_SHIFT 21
688 #define SBIT_SHIFT 20
690 #define T2_OPCODE_MASK 0xfe1fffff
691 #define T2_DATA_OP_SHIFT 21
692 #define T2_SBIT_SHIFT 20
694 #define A_COND_MASK 0xf0000000
695 #define A_PUSH_POP_OP_MASK 0x0fff0000
697 /* Opcodes for pushing/poping registers to/from the stack. */
698 #define A1_OPCODE_PUSH 0x092d0000
699 #define A2_OPCODE_PUSH 0x052d0004
700 #define A2_OPCODE_POP 0x049d0004
702 /* Codes to distinguish the arithmetic instructions. */
713 #define OPCODE_CMP 10
714 #define OPCODE_CMN 11
715 #define OPCODE_ORR 12
716 #define OPCODE_MOV 13
717 #define OPCODE_BIC 14
718 #define OPCODE_MVN 15
720 #define T2_OPCODE_AND 0
721 #define T2_OPCODE_BIC 1
722 #define T2_OPCODE_ORR 2
723 #define T2_OPCODE_ORN 3
724 #define T2_OPCODE_EOR 4
725 #define T2_OPCODE_ADD 8
726 #define T2_OPCODE_ADC 10
727 #define T2_OPCODE_SBC 11
728 #define T2_OPCODE_SUB 13
729 #define T2_OPCODE_RSB 14
731 #define T_OPCODE_MUL 0x4340
732 #define T_OPCODE_TST 0x4200
733 #define T_OPCODE_CMN 0x42c0
734 #define T_OPCODE_NEG 0x4240
735 #define T_OPCODE_MVN 0x43c0
737 #define T_OPCODE_ADD_R3 0x1800
738 #define T_OPCODE_SUB_R3 0x1a00
739 #define T_OPCODE_ADD_HI 0x4400
740 #define T_OPCODE_ADD_ST 0xb000
741 #define T_OPCODE_SUB_ST 0xb080
742 #define T_OPCODE_ADD_SP 0xa800
743 #define T_OPCODE_ADD_PC 0xa000
744 #define T_OPCODE_ADD_I8 0x3000
745 #define T_OPCODE_SUB_I8 0x3800
746 #define T_OPCODE_ADD_I3 0x1c00
747 #define T_OPCODE_SUB_I3 0x1e00
749 #define T_OPCODE_ASR_R 0x4100
750 #define T_OPCODE_LSL_R 0x4080
751 #define T_OPCODE_LSR_R 0x40c0
752 #define T_OPCODE_ROR_R 0x41c0
753 #define T_OPCODE_ASR_I 0x1000
754 #define T_OPCODE_LSL_I 0x0000
755 #define T_OPCODE_LSR_I 0x0800
757 #define T_OPCODE_MOV_I8 0x2000
758 #define T_OPCODE_CMP_I8 0x2800
759 #define T_OPCODE_CMP_LR 0x4280
760 #define T_OPCODE_MOV_HR 0x4600
761 #define T_OPCODE_CMP_HR 0x4500
763 #define T_OPCODE_LDR_PC 0x4800
764 #define T_OPCODE_LDR_SP 0x9800
765 #define T_OPCODE_STR_SP 0x9000
766 #define T_OPCODE_LDR_IW 0x6800
767 #define T_OPCODE_STR_IW 0x6000
768 #define T_OPCODE_LDR_IH 0x8800
769 #define T_OPCODE_STR_IH 0x8000
770 #define T_OPCODE_LDR_IB 0x7800
771 #define T_OPCODE_STR_IB 0x7000
772 #define T_OPCODE_LDR_RW 0x5800
773 #define T_OPCODE_STR_RW 0x5000
774 #define T_OPCODE_LDR_RH 0x5a00
775 #define T_OPCODE_STR_RH 0x5200
776 #define T_OPCODE_LDR_RB 0x5c00
777 #define T_OPCODE_STR_RB 0x5400
779 #define T_OPCODE_PUSH 0xb400
780 #define T_OPCODE_POP 0xbc00
782 #define T_OPCODE_BRANCH 0xe000
784 #define THUMB_SIZE 2 /* Size of thumb instruction. */
785 #define THUMB_PP_PC_LR 0x0100
786 #define THUMB_LOAD_BIT 0x0800
787 #define THUMB2_LOAD_BIT 0x00100000
789 #define BAD_ARGS _("bad arguments to instruction")
790 #define BAD_SP _("r13 not allowed here")
791 #define BAD_PC _("r15 not allowed here")
792 #define BAD_COND _("instruction cannot be conditional")
793 #define BAD_OVERLAP _("registers may not be the same")
794 #define BAD_HIREG _("lo register required")
795 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
796 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
797 #define BAD_BRANCH _("branch must be last instruction in IT block")
798 #define BAD_NOT_IT _("instruction not allowed in IT block")
799 #define BAD_FPU _("selected FPU does not support instruction")
800 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
801 #define BAD_IT_COND _("incorrect condition in IT block")
802 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
803 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
804 #define BAD_PC_ADDRESSING \
805 _("cannot use register index with PC-relative addressing")
806 #define BAD_PC_WRITEBACK \
807 _("cannot use writeback with PC-relative addressing")
808 #define BAD_RANGE _("branch out of range")
809 #define BAD_FP16 _("selected processor does not support fp16 instruction")
810 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
811 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
813 static struct hash_control
* arm_ops_hsh
;
814 static struct hash_control
* arm_cond_hsh
;
815 static struct hash_control
* arm_shift_hsh
;
816 static struct hash_control
* arm_psr_hsh
;
817 static struct hash_control
* arm_v7m_psr_hsh
;
818 static struct hash_control
* arm_reg_hsh
;
819 static struct hash_control
* arm_reloc_hsh
;
820 static struct hash_control
* arm_barrier_opt_hsh
;
822 /* Stuff needed to resolve the label ambiguity
831 symbolS
* last_label_seen
;
832 static int label_is_thumb_function_name
= FALSE
;
834 /* Literal pool structure. Held on a per-section
835 and per-sub-section basis. */
837 #define MAX_LITERAL_POOL_SIZE 1024
838 typedef struct literal_pool
840 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
841 unsigned int next_free_entry
;
847 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
849 struct literal_pool
* next
;
850 unsigned int alignment
;
853 /* Pointer to a linked list of literal pools. */
854 literal_pool
* list_of_pools
= NULL
;
856 typedef enum asmfunc_states
859 WAITING_ASMFUNC_NAME
,
863 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
866 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
868 static struct current_it now_it
;
872 now_it_compatible (int cond
)
874 return (cond
& ~1) == (now_it
.cc
& ~1);
878 conditional_insn (void)
880 return inst
.cond
!= COND_ALWAYS
;
883 static int in_it_block (void);
885 static int handle_it_state (void);
887 static void force_automatic_it_block_close (void);
889 static void it_fsm_post_encode (void);
891 #define set_it_insn_type(type) \
894 inst.it_insn_type = type; \
895 if (handle_it_state () == FAIL) \
900 #define set_it_insn_type_nonvoid(type, failret) \
903 inst.it_insn_type = type; \
904 if (handle_it_state () == FAIL) \
909 #define set_it_insn_type_last() \
912 if (inst.cond == COND_ALWAYS) \
913 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
915 set_it_insn_type (INSIDE_IT_LAST_INSN); \
921 /* This array holds the chars that always start a comment. If the
922 pre-processor is disabled, these aren't very useful. */
923 char arm_comment_chars
[] = "@";
925 /* This array holds the chars that only start a comment at the beginning of
926 a line. If the line seems to have the form '# 123 filename'
927 .line and .file directives will appear in the pre-processed output. */
928 /* Note that input_file.c hand checks for '#' at the beginning of the
929 first line of the input file. This is because the compiler outputs
930 #NO_APP at the beginning of its output. */
931 /* Also note that comments like this one will always work. */
932 const char line_comment_chars
[] = "#";
934 char arm_line_separator_chars
[] = ";";
936 /* Chars that can be used to separate mant
937 from exp in floating point numbers. */
938 const char EXP_CHARS
[] = "eE";
940 /* Chars that mean this number is a floating point constant. */
944 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
946 /* Prefix characters that indicate the start of an immediate
948 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
950 /* Separator character handling. */
952 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
955 skip_past_char (char ** str
, char c
)
957 /* PR gas/14987: Allow for whitespace before the expected character. */
958 skip_whitespace (*str
);
969 #define skip_past_comma(str) skip_past_char (str, ',')
971 /* Arithmetic expressions (possibly involving symbols). */
973 /* Return TRUE if anything in the expression is a bignum. */
976 walk_no_bignums (symbolS
* sp
)
978 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
981 if (symbol_get_value_expression (sp
)->X_add_symbol
)
983 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
984 || (symbol_get_value_expression (sp
)->X_op_symbol
985 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
991 static int in_my_get_expression
= 0;
993 /* Third argument to my_get_expression. */
994 #define GE_NO_PREFIX 0
995 #define GE_IMM_PREFIX 1
996 #define GE_OPT_PREFIX 2
997 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
998 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
999 #define GE_OPT_PREFIX_BIG 3
1002 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1007 /* In unified syntax, all prefixes are optional. */
1009 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1012 switch (prefix_mode
)
1014 case GE_NO_PREFIX
: break;
1016 if (!is_immediate_prefix (**str
))
1018 inst
.error
= _("immediate expression requires a # prefix");
1024 case GE_OPT_PREFIX_BIG
:
1025 if (is_immediate_prefix (**str
))
1031 memset (ep
, 0, sizeof (expressionS
));
1033 save_in
= input_line_pointer
;
1034 input_line_pointer
= *str
;
1035 in_my_get_expression
= 1;
1036 seg
= expression (ep
);
1037 in_my_get_expression
= 0;
1039 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1041 /* We found a bad or missing expression in md_operand(). */
1042 *str
= input_line_pointer
;
1043 input_line_pointer
= save_in
;
1044 if (inst
.error
== NULL
)
1045 inst
.error
= (ep
->X_op
== O_absent
1046 ? _("missing expression") :_("bad expression"));
1051 if (seg
!= absolute_section
1052 && seg
!= text_section
1053 && seg
!= data_section
1054 && seg
!= bss_section
1055 && seg
!= undefined_section
)
1057 inst
.error
= _("bad segment");
1058 *str
= input_line_pointer
;
1059 input_line_pointer
= save_in
;
1066 /* Get rid of any bignums now, so that we don't generate an error for which
1067 we can't establish a line number later on. Big numbers are never valid
1068 in instructions, which is where this routine is always called. */
1069 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1070 && (ep
->X_op
== O_big
1071 || (ep
->X_add_symbol
1072 && (walk_no_bignums (ep
->X_add_symbol
)
1074 && walk_no_bignums (ep
->X_op_symbol
))))))
1076 inst
.error
= _("invalid constant");
1077 *str
= input_line_pointer
;
1078 input_line_pointer
= save_in
;
1082 *str
= input_line_pointer
;
1083 input_line_pointer
= save_in
;
1087 /* Turn a string in input_line_pointer into a floating point constant
1088 of type TYPE, and store the appropriate bytes in *LITP. The number
1089 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1090 returned, or NULL on OK.
1092 Note that fp constants aren't represent in the normal way on the ARM.
1093 In big endian mode, things are as expected. However, in little endian
1094 mode fp constants are big-endian word-wise, and little-endian byte-wise
1095 within the words. For example, (double) 1.1 in big endian mode is
1096 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1097 the byte sequence 99 99 f1 3f 9a 99 99 99.
1099 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1102 md_atof (int type
, char * litP
, int * sizeP
)
1105 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1137 return _("Unrecognized or unsupported floating point constant");
1140 t
= atof_ieee (input_line_pointer
, type
, words
);
1142 input_line_pointer
= t
;
1143 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1145 if (target_big_endian
)
1147 for (i
= 0; i
< prec
; i
++)
1149 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1150 litP
+= sizeof (LITTLENUM_TYPE
);
1155 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1156 for (i
= prec
- 1; i
>= 0; i
--)
1158 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1159 litP
+= sizeof (LITTLENUM_TYPE
);
1162 /* For a 4 byte float the order of elements in `words' is 1 0.
1163 For an 8 byte float the order is 1 0 3 2. */
1164 for (i
= 0; i
< prec
; i
+= 2)
1166 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1167 sizeof (LITTLENUM_TYPE
));
1168 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1169 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1170 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1177 /* We handle all bad expressions here, so that we can report the faulty
1178 instruction in the error message. */
1180 md_operand (expressionS
* exp
)
1182 if (in_my_get_expression
)
1183 exp
->X_op
= O_illegal
;
1186 /* Immediate values. */
1188 /* Generic immediate-value read function for use in directives.
1189 Accepts anything that 'expression' can fold to a constant.
1190 *val receives the number. */
1193 immediate_for_directive (int *val
)
1196 exp
.X_op
= O_illegal
;
1198 if (is_immediate_prefix (*input_line_pointer
))
1200 input_line_pointer
++;
1204 if (exp
.X_op
!= O_constant
)
1206 as_bad (_("expected #constant"));
1207 ignore_rest_of_line ();
1210 *val
= exp
.X_add_number
;
1215 /* Register parsing. */
1217 /* Generic register parser. CCP points to what should be the
1218 beginning of a register name. If it is indeed a valid register
1219 name, advance CCP over it and return the reg_entry structure;
1220 otherwise return NULL. Does not issue diagnostics. */
1222 static struct reg_entry
*
1223 arm_reg_parse_multi (char **ccp
)
1227 struct reg_entry
*reg
;
1229 skip_whitespace (start
);
1231 #ifdef REGISTER_PREFIX
1232 if (*start
!= REGISTER_PREFIX
)
1236 #ifdef OPTIONAL_REGISTER_PREFIX
1237 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1242 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1247 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1249 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1259 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1260 enum arm_reg_type type
)
1262 /* Alternative syntaxes are accepted for a few register classes. */
1269 /* Generic coprocessor register names are allowed for these. */
1270 if (reg
&& reg
->type
== REG_TYPE_CN
)
1275 /* For backward compatibility, a bare number is valid here. */
1277 unsigned long processor
= strtoul (start
, ccp
, 10);
1278 if (*ccp
!= start
&& processor
<= 15)
1283 case REG_TYPE_MMXWC
:
1284 /* WC includes WCG. ??? I'm not sure this is true for all
1285 instructions that take WC registers. */
1286 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1297 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1298 return value is the register number or FAIL. */
1301 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1304 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1307 /* Do not allow a scalar (reg+index) to parse as a register. */
1308 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1311 if (reg
&& reg
->type
== type
)
1314 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1321 /* Parse a Neon type specifier. *STR should point at the leading '.'
1322 character. Does no verification at this stage that the type fits the opcode
1329 Can all be legally parsed by this function.
1331 Fills in neon_type struct pointer with parsed information, and updates STR
1332 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1333 type, FAIL if not. */
1336 parse_neon_type (struct neon_type
*type
, char **str
)
1343 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1345 enum neon_el_type thistype
= NT_untyped
;
1346 unsigned thissize
= -1u;
1353 /* Just a size without an explicit type. */
1357 switch (TOLOWER (*ptr
))
1359 case 'i': thistype
= NT_integer
; break;
1360 case 'f': thistype
= NT_float
; break;
1361 case 'p': thistype
= NT_poly
; break;
1362 case 's': thistype
= NT_signed
; break;
1363 case 'u': thistype
= NT_unsigned
; break;
1365 thistype
= NT_float
;
1370 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1376 /* .f is an abbreviation for .f32. */
1377 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1382 thissize
= strtoul (ptr
, &ptr
, 10);
1384 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1387 as_bad (_("bad size %d in type specifier"), thissize
);
1395 type
->el
[type
->elems
].type
= thistype
;
1396 type
->el
[type
->elems
].size
= thissize
;
1401 /* Empty/missing type is not a successful parse. */
1402 if (type
->elems
== 0)
1410 /* Errors may be set multiple times during parsing or bit encoding
1411 (particularly in the Neon bits), but usually the earliest error which is set
1412 will be the most meaningful. Avoid overwriting it with later (cascading)
1413 errors by calling this function. */
1416 first_error (const char *err
)
1422 /* Parse a single type, e.g. ".s32", leading period included. */
1424 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1427 struct neon_type optype
;
1431 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1433 if (optype
.elems
== 1)
1434 *vectype
= optype
.el
[0];
1437 first_error (_("only one type should be specified for operand"));
1443 first_error (_("vector type expected"));
1455 /* Special meanings for indices (which have a range of 0-7), which will fit into
1458 #define NEON_ALL_LANES 15
1459 #define NEON_INTERLEAVE_LANES 14
1461 /* Parse either a register or a scalar, with an optional type. Return the
1462 register number, and optionally fill in the actual type of the register
1463 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1464 type/index information in *TYPEINFO. */
1467 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1468 enum arm_reg_type
*rtype
,
1469 struct neon_typed_alias
*typeinfo
)
1472 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1473 struct neon_typed_alias atype
;
1474 struct neon_type_el parsetype
;
1478 atype
.eltype
.type
= NT_invtype
;
1479 atype
.eltype
.size
= -1;
1481 /* Try alternate syntax for some types of register. Note these are mutually
1482 exclusive with the Neon syntax extensions. */
1485 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1493 /* Undo polymorphism when a set of register types may be accepted. */
1494 if ((type
== REG_TYPE_NDQ
1495 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1496 || (type
== REG_TYPE_VFSD
1497 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1498 || (type
== REG_TYPE_NSDQ
1499 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1500 || reg
->type
== REG_TYPE_NQ
))
1501 || (type
== REG_TYPE_MMXWC
1502 && (reg
->type
== REG_TYPE_MMXWCG
)))
1503 type
= (enum arm_reg_type
) reg
->type
;
1505 if (type
!= reg
->type
)
1511 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1513 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1515 first_error (_("can't redefine type for operand"));
1518 atype
.defined
|= NTA_HASTYPE
;
1519 atype
.eltype
= parsetype
;
1522 if (skip_past_char (&str
, '[') == SUCCESS
)
1524 if (type
!= REG_TYPE_VFD
)
1526 first_error (_("only D registers may be indexed"));
1530 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1532 first_error (_("can't change index for operand"));
1536 atype
.defined
|= NTA_HASINDEX
;
1538 if (skip_past_char (&str
, ']') == SUCCESS
)
1539 atype
.index
= NEON_ALL_LANES
;
1544 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1546 if (exp
.X_op
!= O_constant
)
1548 first_error (_("constant expression required"));
1552 if (skip_past_char (&str
, ']') == FAIL
)
1555 atype
.index
= exp
.X_add_number
;
1570 /* Like arm_reg_parse, but allow allow the following extra features:
1571 - If RTYPE is non-zero, return the (possibly restricted) type of the
1572 register (e.g. Neon double or quad reg when either has been requested).
1573 - If this is a Neon vector type with additional type information, fill
1574 in the struct pointed to by VECTYPE (if non-NULL).
1575 This function will fault on encountering a scalar. */
1578 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1579 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1581 struct neon_typed_alias atype
;
1583 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1588 /* Do not allow regname(... to parse as a register. */
1592 /* Do not allow a scalar (reg+index) to parse as a register. */
1593 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1595 first_error (_("register operand expected, but got scalar"));
1600 *vectype
= atype
.eltype
;
1607 #define NEON_SCALAR_REG(X) ((X) >> 4)
1608 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1610 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1611 have enough information to be able to do a good job bounds-checking. So, we
1612 just do easy checks here, and do further checks later. */
1615 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1619 struct neon_typed_alias atype
;
1621 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1623 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1626 if (atype
.index
== NEON_ALL_LANES
)
1628 first_error (_("scalar must have an index"));
1631 else if (atype
.index
>= 64 / elsize
)
1633 first_error (_("scalar index out of range"));
1638 *type
= atype
.eltype
;
1642 return reg
* 16 + atype
.index
;
1645 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1648 parse_reg_list (char ** strp
)
1650 char * str
= * strp
;
1654 /* We come back here if we get ranges concatenated by '+' or '|'. */
1657 skip_whitespace (str
);
1671 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1673 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1683 first_error (_("bad range in register list"));
1687 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1689 if (range
& (1 << i
))
1691 (_("Warning: duplicated register (r%d) in register list"),
1699 if (range
& (1 << reg
))
1700 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1702 else if (reg
<= cur_reg
)
1703 as_tsktsk (_("Warning: register range not in ascending order"));
1708 while (skip_past_comma (&str
) != FAIL
1709 || (in_range
= 1, *str
++ == '-'));
1712 if (skip_past_char (&str
, '}') == FAIL
)
1714 first_error (_("missing `}'"));
1722 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1725 if (exp
.X_op
== O_constant
)
1727 if (exp
.X_add_number
1728 != (exp
.X_add_number
& 0x0000ffff))
1730 inst
.error
= _("invalid register mask");
1734 if ((range
& exp
.X_add_number
) != 0)
1736 int regno
= range
& exp
.X_add_number
;
1739 regno
= (1 << regno
) - 1;
1741 (_("Warning: duplicated register (r%d) in register list"),
1745 range
|= exp
.X_add_number
;
1749 if (inst
.reloc
.type
!= 0)
1751 inst
.error
= _("expression too complex");
1755 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1756 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1757 inst
.reloc
.pc_rel
= 0;
1761 if (*str
== '|' || *str
== '+')
1767 while (another_range
);
1773 /* Types of registers in a list. */
1782 /* Parse a VFP register list. If the string is invalid return FAIL.
1783 Otherwise return the number of registers, and set PBASE to the first
1784 register. Parses registers of type ETYPE.
1785 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1786 - Q registers can be used to specify pairs of D registers
1787 - { } can be omitted from around a singleton register list
1788 FIXME: This is not implemented, as it would require backtracking in
1791 This could be done (the meaning isn't really ambiguous), but doesn't
1792 fit in well with the current parsing framework.
1793 - 32 D registers may be used (also true for VFPv3).
1794 FIXME: Types are ignored in these register lists, which is probably a
1798 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1803 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1807 unsigned long mask
= 0;
1810 if (skip_past_char (&str
, '{') == FAIL
)
1812 inst
.error
= _("expecting {");
1819 regtype
= REG_TYPE_VFS
;
1824 regtype
= REG_TYPE_VFD
;
1827 case REGLIST_NEON_D
:
1828 regtype
= REG_TYPE_NDQ
;
1832 if (etype
!= REGLIST_VFP_S
)
1834 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1835 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1839 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1842 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1849 base_reg
= max_regs
;
1853 int setmask
= 1, addregs
= 1;
1855 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1857 if (new_base
== FAIL
)
1859 first_error (_(reg_expected_msgs
[regtype
]));
1863 if (new_base
>= max_regs
)
1865 first_error (_("register out of range in list"));
1869 /* Note: a value of 2 * n is returned for the register Q<n>. */
1870 if (regtype
== REG_TYPE_NQ
)
1876 if (new_base
< base_reg
)
1877 base_reg
= new_base
;
1879 if (mask
& (setmask
<< new_base
))
1881 first_error (_("invalid register list"));
1885 if ((mask
>> new_base
) != 0 && ! warned
)
1887 as_tsktsk (_("register list not in ascending order"));
1891 mask
|= setmask
<< new_base
;
1894 if (*str
== '-') /* We have the start of a range expression */
1900 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1903 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1907 if (high_range
>= max_regs
)
1909 first_error (_("register out of range in list"));
1913 if (regtype
== REG_TYPE_NQ
)
1914 high_range
= high_range
+ 1;
1916 if (high_range
<= new_base
)
1918 inst
.error
= _("register range not in ascending order");
1922 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1924 if (mask
& (setmask
<< new_base
))
1926 inst
.error
= _("invalid register list");
1930 mask
|= setmask
<< new_base
;
1935 while (skip_past_comma (&str
) != FAIL
);
1939 /* Sanity check -- should have raised a parse error above. */
1940 if (count
== 0 || count
> max_regs
)
1945 /* Final test -- the registers must be consecutive. */
1947 for (i
= 0; i
< count
; i
++)
1949 if ((mask
& (1u << i
)) == 0)
1951 inst
.error
= _("non-contiguous register range");
1961 /* True if two alias types are the same. */
1964 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1972 if (a
->defined
!= b
->defined
)
1975 if ((a
->defined
& NTA_HASTYPE
) != 0
1976 && (a
->eltype
.type
!= b
->eltype
.type
1977 || a
->eltype
.size
!= b
->eltype
.size
))
1980 if ((a
->defined
& NTA_HASINDEX
) != 0
1981 && (a
->index
!= b
->index
))
1987 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1988 The base register is put in *PBASE.
1989 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1991 The register stride (minus one) is put in bit 4 of the return value.
1992 Bits [6:5] encode the list length (minus one).
1993 The type of the list elements is put in *ELTYPE, if non-NULL. */
1995 #define NEON_LANE(X) ((X) & 0xf)
1996 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1997 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2000 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2001 struct neon_type_el
*eltype
)
2008 int leading_brace
= 0;
2009 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2010 const char *const incr_error
= _("register stride must be 1 or 2");
2011 const char *const type_error
= _("mismatched element/structure types in list");
2012 struct neon_typed_alias firsttype
;
2013 firsttype
.defined
= 0;
2014 firsttype
.eltype
.type
= NT_invtype
;
2015 firsttype
.eltype
.size
= -1;
2016 firsttype
.index
= -1;
2018 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2023 struct neon_typed_alias atype
;
2024 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2028 first_error (_(reg_expected_msgs
[rtype
]));
2035 if (rtype
== REG_TYPE_NQ
)
2041 else if (reg_incr
== -1)
2043 reg_incr
= getreg
- base_reg
;
2044 if (reg_incr
< 1 || reg_incr
> 2)
2046 first_error (_(incr_error
));
2050 else if (getreg
!= base_reg
+ reg_incr
* count
)
2052 first_error (_(incr_error
));
2056 if (! neon_alias_types_same (&atype
, &firsttype
))
2058 first_error (_(type_error
));
2062 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2066 struct neon_typed_alias htype
;
2067 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2069 lane
= NEON_INTERLEAVE_LANES
;
2070 else if (lane
!= NEON_INTERLEAVE_LANES
)
2072 first_error (_(type_error
));
2077 else if (reg_incr
!= 1)
2079 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2083 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2086 first_error (_(reg_expected_msgs
[rtype
]));
2089 if (! neon_alias_types_same (&htype
, &firsttype
))
2091 first_error (_(type_error
));
2094 count
+= hireg
+ dregs
- getreg
;
2098 /* If we're using Q registers, we can't use [] or [n] syntax. */
2099 if (rtype
== REG_TYPE_NQ
)
2105 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2109 else if (lane
!= atype
.index
)
2111 first_error (_(type_error
));
2115 else if (lane
== -1)
2116 lane
= NEON_INTERLEAVE_LANES
;
2117 else if (lane
!= NEON_INTERLEAVE_LANES
)
2119 first_error (_(type_error
));
2124 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2126 /* No lane set by [x]. We must be interleaving structures. */
2128 lane
= NEON_INTERLEAVE_LANES
;
2131 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2132 || (count
> 1 && reg_incr
== -1))
2134 first_error (_("error parsing element/structure list"));
2138 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2140 first_error (_("expected }"));
2148 *eltype
= firsttype
.eltype
;
2153 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2156 /* Parse an explicit relocation suffix on an expression. This is
2157 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2158 arm_reloc_hsh contains no entries, so this function can only
2159 succeed if there is no () after the word. Returns -1 on error,
2160 BFD_RELOC_UNUSED if there wasn't any suffix. */
2163 parse_reloc (char **str
)
2165 struct reloc_entry
*r
;
2169 return BFD_RELOC_UNUSED
;
2174 while (*q
&& *q
!= ')' && *q
!= ',')
2179 if ((r
= (struct reloc_entry
*)
2180 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2187 /* Directives: register aliases. */
2189 static struct reg_entry
*
2190 insert_reg_alias (char *str
, unsigned number
, int type
)
2192 struct reg_entry
*new_reg
;
2195 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2197 if (new_reg
->builtin
)
2198 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2200 /* Only warn about a redefinition if it's not defined as the
2202 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2203 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2208 name
= xstrdup (str
);
2209 new_reg
= XNEW (struct reg_entry
);
2211 new_reg
->name
= name
;
2212 new_reg
->number
= number
;
2213 new_reg
->type
= type
;
2214 new_reg
->builtin
= FALSE
;
2215 new_reg
->neon
= NULL
;
2217 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2224 insert_neon_reg_alias (char *str
, int number
, int type
,
2225 struct neon_typed_alias
*atype
)
2227 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2231 first_error (_("attempt to redefine typed alias"));
2237 reg
->neon
= XNEW (struct neon_typed_alias
);
2238 *reg
->neon
= *atype
;
2242 /* Look for the .req directive. This is of the form:
2244 new_register_name .req existing_register_name
2246 If we find one, or if it looks sufficiently like one that we want to
2247 handle any error here, return TRUE. Otherwise return FALSE. */
2250 create_register_alias (char * newname
, char *p
)
2252 struct reg_entry
*old
;
2253 char *oldname
, *nbuf
;
2256 /* The input scrubber ensures that whitespace after the mnemonic is
2257 collapsed to single spaces. */
2259 if (strncmp (oldname
, " .req ", 6) != 0)
2263 if (*oldname
== '\0')
2266 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2269 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2273 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2274 the desired alias name, and p points to its end. If not, then
2275 the desired alias name is in the global original_case_string. */
2276 #ifdef TC_CASE_SENSITIVE
2279 newname
= original_case_string
;
2280 nlen
= strlen (newname
);
2283 nbuf
= xmemdup0 (newname
, nlen
);
2285 /* Create aliases under the new name as stated; an all-lowercase
2286 version of the new name; and an all-uppercase version of the new
2288 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2290 for (p
= nbuf
; *p
; p
++)
2293 if (strncmp (nbuf
, newname
, nlen
))
2295 /* If this attempt to create an additional alias fails, do not bother
2296 trying to create the all-lower case alias. We will fail and issue
2297 a second, duplicate error message. This situation arises when the
2298 programmer does something like:
2301 The second .req creates the "Foo" alias but then fails to create
2302 the artificial FOO alias because it has already been created by the
2304 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2311 for (p
= nbuf
; *p
; p
++)
2314 if (strncmp (nbuf
, newname
, nlen
))
2315 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2322 /* Create a Neon typed/indexed register alias using directives, e.g.:
2327 These typed registers can be used instead of the types specified after the
2328 Neon mnemonic, so long as all operands given have types. Types can also be
2329 specified directly, e.g.:
2330 vadd d0.s32, d1.s32, d2.s32 */
2333 create_neon_reg_alias (char *newname
, char *p
)
2335 enum arm_reg_type basetype
;
2336 struct reg_entry
*basereg
;
2337 struct reg_entry mybasereg
;
2338 struct neon_type ntype
;
2339 struct neon_typed_alias typeinfo
;
2340 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2343 typeinfo
.defined
= 0;
2344 typeinfo
.eltype
.type
= NT_invtype
;
2345 typeinfo
.eltype
.size
= -1;
2346 typeinfo
.index
= -1;
2350 if (strncmp (p
, " .dn ", 5) == 0)
2351 basetype
= REG_TYPE_VFD
;
2352 else if (strncmp (p
, " .qn ", 5) == 0)
2353 basetype
= REG_TYPE_NQ
;
2362 basereg
= arm_reg_parse_multi (&p
);
2364 if (basereg
&& basereg
->type
!= basetype
)
2366 as_bad (_("bad type for register"));
2370 if (basereg
== NULL
)
2373 /* Try parsing as an integer. */
2374 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2375 if (exp
.X_op
!= O_constant
)
2377 as_bad (_("expression must be constant"));
2380 basereg
= &mybasereg
;
2381 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2387 typeinfo
= *basereg
->neon
;
2389 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2391 /* We got a type. */
2392 if (typeinfo
.defined
& NTA_HASTYPE
)
2394 as_bad (_("can't redefine the type of a register alias"));
2398 typeinfo
.defined
|= NTA_HASTYPE
;
2399 if (ntype
.elems
!= 1)
2401 as_bad (_("you must specify a single type only"));
2404 typeinfo
.eltype
= ntype
.el
[0];
2407 if (skip_past_char (&p
, '[') == SUCCESS
)
2410 /* We got a scalar index. */
2412 if (typeinfo
.defined
& NTA_HASINDEX
)
2414 as_bad (_("can't redefine the index of a scalar alias"));
2418 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2420 if (exp
.X_op
!= O_constant
)
2422 as_bad (_("scalar index must be constant"));
2426 typeinfo
.defined
|= NTA_HASINDEX
;
2427 typeinfo
.index
= exp
.X_add_number
;
2429 if (skip_past_char (&p
, ']') == FAIL
)
2431 as_bad (_("expecting ]"));
2436 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2437 the desired alias name, and p points to its end. If not, then
2438 the desired alias name is in the global original_case_string. */
2439 #ifdef TC_CASE_SENSITIVE
2440 namelen
= nameend
- newname
;
2442 newname
= original_case_string
;
2443 namelen
= strlen (newname
);
2446 namebuf
= xmemdup0 (newname
, namelen
);
2448 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2449 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2451 /* Insert name in all uppercase. */
2452 for (p
= namebuf
; *p
; p
++)
2455 if (strncmp (namebuf
, newname
, namelen
))
2456 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2457 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2459 /* Insert name in all lowercase. */
2460 for (p
= namebuf
; *p
; p
++)
2463 if (strncmp (namebuf
, newname
, namelen
))
2464 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2465 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2471 /* Should never be called, as .req goes between the alias and the
2472 register name, not at the beginning of the line. */
2475 s_req (int a ATTRIBUTE_UNUSED
)
2477 as_bad (_("invalid syntax for .req directive"));
2481 s_dn (int a ATTRIBUTE_UNUSED
)
2483 as_bad (_("invalid syntax for .dn directive"));
2487 s_qn (int a ATTRIBUTE_UNUSED
)
2489 as_bad (_("invalid syntax for .qn directive"));
2492 /* The .unreq directive deletes an alias which was previously defined
2493 by .req. For example:
2499 s_unreq (int a ATTRIBUTE_UNUSED
)
2504 name
= input_line_pointer
;
2506 while (*input_line_pointer
!= 0
2507 && *input_line_pointer
!= ' '
2508 && *input_line_pointer
!= '\n')
2509 ++input_line_pointer
;
2511 saved_char
= *input_line_pointer
;
2512 *input_line_pointer
= 0;
2515 as_bad (_("invalid syntax for .unreq directive"));
2518 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2522 as_bad (_("unknown register alias '%s'"), name
);
2523 else if (reg
->builtin
)
2524 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2531 hash_delete (arm_reg_hsh
, name
, FALSE
);
2532 free ((char *) reg
->name
);
2537 /* Also locate the all upper case and all lower case versions.
2538 Do not complain if we cannot find one or the other as it
2539 was probably deleted above. */
2541 nbuf
= strdup (name
);
2542 for (p
= nbuf
; *p
; p
++)
2544 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2547 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2548 free ((char *) reg
->name
);
2554 for (p
= nbuf
; *p
; p
++)
2556 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2559 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2560 free ((char *) reg
->name
);
2570 *input_line_pointer
= saved_char
;
2571 demand_empty_rest_of_line ();
2574 /* Directives: Instruction set selection. */
2577 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2578 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2579 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2580 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2582 /* Create a new mapping symbol for the transition to STATE. */
2585 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2588 const char * symname
;
2595 type
= BSF_NO_FLAGS
;
2599 type
= BSF_NO_FLAGS
;
2603 type
= BSF_NO_FLAGS
;
2609 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2610 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2615 THUMB_SET_FUNC (symbolP
, 0);
2616 ARM_SET_THUMB (symbolP
, 0);
2617 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2621 THUMB_SET_FUNC (symbolP
, 1);
2622 ARM_SET_THUMB (symbolP
, 1);
2623 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2631 /* Save the mapping symbols for future reference. Also check that
2632 we do not place two mapping symbols at the same offset within a
2633 frag. We'll handle overlap between frags in
2634 check_mapping_symbols.
2636 If .fill or other data filling directive generates zero sized data,
2637 the mapping symbol for the following code will have the same value
2638 as the one generated for the data filling directive. In this case,
2639 we replace the old symbol with the new one at the same address. */
2642 if (frag
->tc_frag_data
.first_map
!= NULL
)
2644 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2645 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2647 frag
->tc_frag_data
.first_map
= symbolP
;
2649 if (frag
->tc_frag_data
.last_map
!= NULL
)
2651 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2652 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2653 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2655 frag
->tc_frag_data
.last_map
= symbolP
;
2658 /* We must sometimes convert a region marked as code to data during
2659 code alignment, if an odd number of bytes have to be padded. The
2660 code mapping symbol is pushed to an aligned address. */
2663 insert_data_mapping_symbol (enum mstate state
,
2664 valueT value
, fragS
*frag
, offsetT bytes
)
2666 /* If there was already a mapping symbol, remove it. */
2667 if (frag
->tc_frag_data
.last_map
!= NULL
2668 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2670 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2674 know (frag
->tc_frag_data
.first_map
== symp
);
2675 frag
->tc_frag_data
.first_map
= NULL
;
2677 frag
->tc_frag_data
.last_map
= NULL
;
2678 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2681 make_mapping_symbol (MAP_DATA
, value
, frag
);
2682 make_mapping_symbol (state
, value
+ bytes
, frag
);
2685 static void mapping_state_2 (enum mstate state
, int max_chars
);
2687 /* Set the mapping state to STATE. Only call this when about to
2688 emit some STATE bytes to the file. */
2690 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2692 mapping_state (enum mstate state
)
2694 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2696 if (mapstate
== state
)
2697 /* The mapping symbol has already been emitted.
2698 There is nothing else to do. */
2701 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2703 All ARM instructions require 4-byte alignment.
2704 (Almost) all Thumb instructions require 2-byte alignment.
2706 When emitting instructions into any section, mark the section
2709 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2710 but themselves require 2-byte alignment; this applies to some
2711 PC- relative forms. However, these cases will invovle implicit
2712 literal pool generation or an explicit .align >=2, both of
2713 which will cause the section to me marked with sufficient
2714 alignment. Thus, we don't handle those cases here. */
2715 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2717 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2718 /* This case will be evaluated later. */
2721 mapping_state_2 (state
, 0);
2724 /* Same as mapping_state, but MAX_CHARS bytes have already been
2725 allocated. Put the mapping symbol that far back. */
2728 mapping_state_2 (enum mstate state
, int max_chars
)
2730 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2732 if (!SEG_NORMAL (now_seg
))
2735 if (mapstate
== state
)
2736 /* The mapping symbol has already been emitted.
2737 There is nothing else to do. */
2740 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2741 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2743 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2744 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2747 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2750 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2751 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2755 #define mapping_state(x) ((void)0)
2756 #define mapping_state_2(x, y) ((void)0)
2759 /* Find the real, Thumb encoded start of a Thumb function. */
2763 find_real_start (symbolS
* symbolP
)
2766 const char * name
= S_GET_NAME (symbolP
);
2767 symbolS
* new_target
;
2769 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2770 #define STUB_NAME ".real_start_of"
2775 /* The compiler may generate BL instructions to local labels because
2776 it needs to perform a branch to a far away location. These labels
2777 do not have a corresponding ".real_start_of" label. We check
2778 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2779 the ".real_start_of" convention for nonlocal branches. */
2780 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2783 real_start
= concat (STUB_NAME
, name
, NULL
);
2784 new_target
= symbol_find (real_start
);
2787 if (new_target
== NULL
)
2789 as_warn (_("Failed to find real start of function: %s\n"), name
);
2790 new_target
= symbolP
;
2798 opcode_select (int width
)
2805 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2806 as_bad (_("selected processor does not support THUMB opcodes"));
2809 /* No need to force the alignment, since we will have been
2810 coming from ARM mode, which is word-aligned. */
2811 record_alignment (now_seg
, 1);
2818 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2819 as_bad (_("selected processor does not support ARM opcodes"));
2824 frag_align (2, 0, 0);
2826 record_alignment (now_seg
, 1);
2831 as_bad (_("invalid instruction size selected (%d)"), width
);
2836 s_arm (int ignore ATTRIBUTE_UNUSED
)
2839 demand_empty_rest_of_line ();
2843 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2846 demand_empty_rest_of_line ();
2850 s_code (int unused ATTRIBUTE_UNUSED
)
2854 temp
= get_absolute_expression ();
2859 opcode_select (temp
);
2863 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2868 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2870 /* If we are not already in thumb mode go into it, EVEN if
2871 the target processor does not support thumb instructions.
2872 This is used by gcc/config/arm/lib1funcs.asm for example
2873 to compile interworking support functions even if the
2874 target processor should not support interworking. */
2878 record_alignment (now_seg
, 1);
2881 demand_empty_rest_of_line ();
2885 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2889 /* The following label is the name/address of the start of a Thumb function.
2890 We need to know this for the interworking support. */
2891 label_is_thumb_function_name
= TRUE
;
2894 /* Perform a .set directive, but also mark the alias as
2895 being a thumb function. */
2898 s_thumb_set (int equiv
)
2900 /* XXX the following is a duplicate of the code for s_set() in read.c
2901 We cannot just call that code as we need to get at the symbol that
2908 /* Especial apologies for the random logic:
2909 This just grew, and could be parsed much more simply!
2911 delim
= get_symbol_name (& name
);
2912 end_name
= input_line_pointer
;
2913 (void) restore_line_pointer (delim
);
2915 if (*input_line_pointer
!= ',')
2918 as_bad (_("expected comma after name \"%s\""), name
);
2920 ignore_rest_of_line ();
2924 input_line_pointer
++;
2927 if (name
[0] == '.' && name
[1] == '\0')
2929 /* XXX - this should not happen to .thumb_set. */
2933 if ((symbolP
= symbol_find (name
)) == NULL
2934 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2937 /* When doing symbol listings, play games with dummy fragments living
2938 outside the normal fragment chain to record the file and line info
2940 if (listing
& LISTING_SYMBOLS
)
2942 extern struct list_info_struct
* listing_tail
;
2943 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2945 memset (dummy_frag
, 0, sizeof (fragS
));
2946 dummy_frag
->fr_type
= rs_fill
;
2947 dummy_frag
->line
= listing_tail
;
2948 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2949 dummy_frag
->fr_symbol
= symbolP
;
2953 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2956 /* "set" symbols are local unless otherwise specified. */
2957 SF_SET_LOCAL (symbolP
);
2958 #endif /* OBJ_COFF */
2959 } /* Make a new symbol. */
2961 symbol_table_insert (symbolP
);
2966 && S_IS_DEFINED (symbolP
)
2967 && S_GET_SEGMENT (symbolP
) != reg_section
)
2968 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2970 pseudo_set (symbolP
);
2972 demand_empty_rest_of_line ();
2974 /* XXX Now we come to the Thumb specific bit of code. */
2976 THUMB_SET_FUNC (symbolP
, 1);
2977 ARM_SET_THUMB (symbolP
, 1);
2978 #if defined OBJ_ELF || defined OBJ_COFF
2979 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2983 /* Directives: Mode selection. */
2985 /* .syntax [unified|divided] - choose the new unified syntax
2986 (same for Arm and Thumb encoding, modulo slight differences in what
2987 can be represented) or the old divergent syntax for each mode. */
2989 s_syntax (int unused ATTRIBUTE_UNUSED
)
2993 delim
= get_symbol_name (& name
);
2995 if (!strcasecmp (name
, "unified"))
2996 unified_syntax
= TRUE
;
2997 else if (!strcasecmp (name
, "divided"))
2998 unified_syntax
= FALSE
;
3001 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3004 (void) restore_line_pointer (delim
);
3005 demand_empty_rest_of_line ();
3008 /* Directives: sectioning and alignment. */
3011 s_bss (int ignore ATTRIBUTE_UNUSED
)
3013 /* We don't support putting frags in the BSS segment, we fake it by
3014 marking in_bss, then looking at s_skip for clues. */
3015 subseg_set (bss_section
, 0);
3016 demand_empty_rest_of_line ();
3018 #ifdef md_elf_section_change_hook
3019 md_elf_section_change_hook ();
3024 s_even (int ignore ATTRIBUTE_UNUSED
)
3026 /* Never make frag if expect extra pass. */
3028 frag_align (1, 0, 0);
3030 record_alignment (now_seg
, 1);
3032 demand_empty_rest_of_line ();
3035 /* Directives: CodeComposer Studio. */
3037 /* .ref (for CodeComposer Studio syntax only). */
3039 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3041 if (codecomposer_syntax
)
3042 ignore_rest_of_line ();
3044 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3047 /* If name is not NULL, then it is used for marking the beginning of a
3048 function, wherease if it is NULL then it means the function end. */
3050 asmfunc_debug (const char * name
)
3052 static const char * last_name
= NULL
;
3056 gas_assert (last_name
== NULL
);
3059 if (debug_type
== DEBUG_STABS
)
3060 stabs_generate_asm_func (name
, name
);
3064 gas_assert (last_name
!= NULL
);
3066 if (debug_type
== DEBUG_STABS
)
3067 stabs_generate_asm_endfunc (last_name
, last_name
);
3074 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3076 if (codecomposer_syntax
)
3078 switch (asmfunc_state
)
3080 case OUTSIDE_ASMFUNC
:
3081 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3084 case WAITING_ASMFUNC_NAME
:
3085 as_bad (_(".asmfunc repeated."));
3088 case WAITING_ENDASMFUNC
:
3089 as_bad (_(".asmfunc without function."));
3092 demand_empty_rest_of_line ();
3095 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3099 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3101 if (codecomposer_syntax
)
3103 switch (asmfunc_state
)
3105 case OUTSIDE_ASMFUNC
:
3106 as_bad (_(".endasmfunc without a .asmfunc."));
3109 case WAITING_ASMFUNC_NAME
:
3110 as_bad (_(".endasmfunc without function."));
3113 case WAITING_ENDASMFUNC
:
3114 asmfunc_state
= OUTSIDE_ASMFUNC
;
3115 asmfunc_debug (NULL
);
3118 demand_empty_rest_of_line ();
3121 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3125 s_ccs_def (int name
)
3127 if (codecomposer_syntax
)
3130 as_bad (_(".def pseudo-op only available with -mccs flag."));
3133 /* Directives: Literal pools. */
3135 static literal_pool
*
3136 find_literal_pool (void)
3138 literal_pool
* pool
;
3140 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3142 if (pool
->section
== now_seg
3143 && pool
->sub_section
== now_subseg
)
3150 static literal_pool
*
3151 find_or_make_literal_pool (void)
3153 /* Next literal pool ID number. */
3154 static unsigned int latest_pool_num
= 1;
3155 literal_pool
* pool
;
3157 pool
= find_literal_pool ();
3161 /* Create a new pool. */
3162 pool
= XNEW (literal_pool
);
3166 pool
->next_free_entry
= 0;
3167 pool
->section
= now_seg
;
3168 pool
->sub_section
= now_subseg
;
3169 pool
->next
= list_of_pools
;
3170 pool
->symbol
= NULL
;
3171 pool
->alignment
= 2;
3173 /* Add it to the list. */
3174 list_of_pools
= pool
;
3177 /* New pools, and emptied pools, will have a NULL symbol. */
3178 if (pool
->symbol
== NULL
)
3180 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3181 (valueT
) 0, &zero_address_frag
);
3182 pool
->id
= latest_pool_num
++;
3189 /* Add the literal in the global 'inst'
3190 structure to the relevant literal pool. */
3193 add_to_lit_pool (unsigned int nbytes
)
3195 #define PADDING_SLOT 0x1
3196 #define LIT_ENTRY_SIZE_MASK 0xFF
3197 literal_pool
* pool
;
3198 unsigned int entry
, pool_size
= 0;
3199 bfd_boolean padding_slot_p
= FALSE
;
3205 imm1
= inst
.operands
[1].imm
;
3206 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3207 : inst
.reloc
.exp
.X_unsigned
? 0
3208 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3209 if (target_big_endian
)
3212 imm2
= inst
.operands
[1].imm
;
3216 pool
= find_or_make_literal_pool ();
3218 /* Check if this literal value is already in the pool. */
3219 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3223 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3224 && (inst
.reloc
.exp
.X_op
== O_constant
)
3225 && (pool
->literals
[entry
].X_add_number
3226 == inst
.reloc
.exp
.X_add_number
)
3227 && (pool
->literals
[entry
].X_md
== nbytes
)
3228 && (pool
->literals
[entry
].X_unsigned
3229 == inst
.reloc
.exp
.X_unsigned
))
3232 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3233 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3234 && (pool
->literals
[entry
].X_add_number
3235 == inst
.reloc
.exp
.X_add_number
)
3236 && (pool
->literals
[entry
].X_add_symbol
3237 == inst
.reloc
.exp
.X_add_symbol
)
3238 && (pool
->literals
[entry
].X_op_symbol
3239 == inst
.reloc
.exp
.X_op_symbol
)
3240 && (pool
->literals
[entry
].X_md
== nbytes
))
3243 else if ((nbytes
== 8)
3244 && !(pool_size
& 0x7)
3245 && ((entry
+ 1) != pool
->next_free_entry
)
3246 && (pool
->literals
[entry
].X_op
== O_constant
)
3247 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3248 && (pool
->literals
[entry
].X_unsigned
3249 == inst
.reloc
.exp
.X_unsigned
)
3250 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3251 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3252 && (pool
->literals
[entry
+ 1].X_unsigned
3253 == inst
.reloc
.exp
.X_unsigned
))
3256 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3257 if (padding_slot_p
&& (nbytes
== 4))
3263 /* Do we need to create a new entry? */
3264 if (entry
== pool
->next_free_entry
)
3266 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3268 inst
.error
= _("literal pool overflow");
3274 /* For 8-byte entries, we align to an 8-byte boundary,
3275 and split it into two 4-byte entries, because on 32-bit
3276 host, 8-byte constants are treated as big num, thus
3277 saved in "generic_bignum" which will be overwritten
3278 by later assignments.
3280 We also need to make sure there is enough space for
3283 We also check to make sure the literal operand is a
3285 if (!(inst
.reloc
.exp
.X_op
== O_constant
3286 || inst
.reloc
.exp
.X_op
== O_big
))
3288 inst
.error
= _("invalid type for literal pool");
3291 else if (pool_size
& 0x7)
3293 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3295 inst
.error
= _("literal pool overflow");
3299 pool
->literals
[entry
] = inst
.reloc
.exp
;
3300 pool
->literals
[entry
].X_op
= O_constant
;
3301 pool
->literals
[entry
].X_add_number
= 0;
3302 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3303 pool
->next_free_entry
+= 1;
3306 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3308 inst
.error
= _("literal pool overflow");
3312 pool
->literals
[entry
] = inst
.reloc
.exp
;
3313 pool
->literals
[entry
].X_op
= O_constant
;
3314 pool
->literals
[entry
].X_add_number
= imm1
;
3315 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3316 pool
->literals
[entry
++].X_md
= 4;
3317 pool
->literals
[entry
] = inst
.reloc
.exp
;
3318 pool
->literals
[entry
].X_op
= O_constant
;
3319 pool
->literals
[entry
].X_add_number
= imm2
;
3320 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3321 pool
->literals
[entry
].X_md
= 4;
3322 pool
->alignment
= 3;
3323 pool
->next_free_entry
+= 1;
3327 pool
->literals
[entry
] = inst
.reloc
.exp
;
3328 pool
->literals
[entry
].X_md
= 4;
3332 /* PR ld/12974: Record the location of the first source line to reference
3333 this entry in the literal pool. If it turns out during linking that the
3334 symbol does not exist we will be able to give an accurate line number for
3335 the (first use of the) missing reference. */
3336 if (debug_type
== DEBUG_DWARF2
)
3337 dwarf2_where (pool
->locs
+ entry
);
3339 pool
->next_free_entry
+= 1;
3341 else if (padding_slot_p
)
3343 pool
->literals
[entry
] = inst
.reloc
.exp
;
3344 pool
->literals
[entry
].X_md
= nbytes
;
3347 inst
.reloc
.exp
.X_op
= O_symbol
;
3348 inst
.reloc
.exp
.X_add_number
= pool_size
;
3349 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3355 tc_start_label_without_colon (void)
3357 bfd_boolean ret
= TRUE
;
3359 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3361 const char *label
= input_line_pointer
;
3363 while (!is_end_of_line
[(int) label
[-1]])
3368 as_bad (_("Invalid label '%s'"), label
);
3372 asmfunc_debug (label
);
3374 asmfunc_state
= WAITING_ENDASMFUNC
;
3380 /* Can't use symbol_new here, so have to create a symbol and then at
3381 a later date assign it a value. Thats what these functions do. */
3384 symbol_locate (symbolS
* symbolP
,
3385 const char * name
, /* It is copied, the caller can modify. */
3386 segT segment
, /* Segment identifier (SEG_<something>). */
3387 valueT valu
, /* Symbol value. */
3388 fragS
* frag
) /* Associated fragment. */
3391 char * preserved_copy_of_name
;
3393 name_length
= strlen (name
) + 1; /* +1 for \0. */
3394 obstack_grow (¬es
, name
, name_length
);
3395 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3397 #ifdef tc_canonicalize_symbol_name
3398 preserved_copy_of_name
=
3399 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3402 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3404 S_SET_SEGMENT (symbolP
, segment
);
3405 S_SET_VALUE (symbolP
, valu
);
3406 symbol_clear_list_pointers (symbolP
);
3408 symbol_set_frag (symbolP
, frag
);
3410 /* Link to end of symbol chain. */
3412 extern int symbol_table_frozen
;
3414 if (symbol_table_frozen
)
3418 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3420 obj_symbol_new_hook (symbolP
);
3422 #ifdef tc_symbol_new_hook
3423 tc_symbol_new_hook (symbolP
);
3427 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3428 #endif /* DEBUG_SYMS */
3432 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3435 literal_pool
* pool
;
3438 pool
= find_literal_pool ();
3440 || pool
->symbol
== NULL
3441 || pool
->next_free_entry
== 0)
3444 /* Align pool as you have word accesses.
3445 Only make a frag if we have to. */
3447 frag_align (pool
->alignment
, 0, 0);
3449 record_alignment (now_seg
, 2);
3452 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3453 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3455 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3457 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3458 (valueT
) frag_now_fix (), frag_now
);
3459 symbol_table_insert (pool
->symbol
);
3461 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3463 #if defined OBJ_COFF || defined OBJ_ELF
3464 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3467 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3470 if (debug_type
== DEBUG_DWARF2
)
3471 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3473 /* First output the expression in the instruction to the pool. */
3474 emit_expr (&(pool
->literals
[entry
]),
3475 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3478 /* Mark the pool as empty. */
3479 pool
->next_free_entry
= 0;
3480 pool
->symbol
= NULL
;
3484 /* Forward declarations for functions below, in the MD interface
3486 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3487 static valueT
create_unwind_entry (int);
3488 static void start_unwind_section (const segT
, int);
3489 static void add_unwind_opcode (valueT
, int);
3490 static void flush_pending_unwind (void);
3492 /* Directives: Data. */
3495 s_arm_elf_cons (int nbytes
)
3499 #ifdef md_flush_pending_output
3500 md_flush_pending_output ();
3503 if (is_it_end_of_statement ())
3505 demand_empty_rest_of_line ();
3509 #ifdef md_cons_align
3510 md_cons_align (nbytes
);
3513 mapping_state (MAP_DATA
);
3517 char *base
= input_line_pointer
;
3521 if (exp
.X_op
!= O_symbol
)
3522 emit_expr (&exp
, (unsigned int) nbytes
);
3525 char *before_reloc
= input_line_pointer
;
3526 reloc
= parse_reloc (&input_line_pointer
);
3529 as_bad (_("unrecognized relocation suffix"));
3530 ignore_rest_of_line ();
3533 else if (reloc
== BFD_RELOC_UNUSED
)
3534 emit_expr (&exp
, (unsigned int) nbytes
);
3537 reloc_howto_type
*howto
= (reloc_howto_type
*)
3538 bfd_reloc_type_lookup (stdoutput
,
3539 (bfd_reloc_code_real_type
) reloc
);
3540 int size
= bfd_get_reloc_size (howto
);
3542 if (reloc
== BFD_RELOC_ARM_PLT32
)
3544 as_bad (_("(plt) is only valid on branch targets"));
3545 reloc
= BFD_RELOC_UNUSED
;
3550 as_bad (_("%s relocations do not fit in %d bytes"),
3551 howto
->name
, nbytes
);
3554 /* We've parsed an expression stopping at O_symbol.
3555 But there may be more expression left now that we
3556 have parsed the relocation marker. Parse it again.
3557 XXX Surely there is a cleaner way to do this. */
3558 char *p
= input_line_pointer
;
3560 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3562 memcpy (save_buf
, base
, input_line_pointer
- base
);
3563 memmove (base
+ (input_line_pointer
- before_reloc
),
3564 base
, before_reloc
- base
);
3566 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3568 memcpy (base
, save_buf
, p
- base
);
3570 offset
= nbytes
- size
;
3571 p
= frag_more (nbytes
);
3572 memset (p
, 0, nbytes
);
3573 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3574 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3580 while (*input_line_pointer
++ == ',');
3582 /* Put terminator back into stream. */
3583 input_line_pointer
--;
3584 demand_empty_rest_of_line ();
3587 /* Emit an expression containing a 32-bit thumb instruction.
3588 Implementation based on put_thumb32_insn. */
3591 emit_thumb32_expr (expressionS
* exp
)
3593 expressionS exp_high
= *exp
;
3595 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3596 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3597 exp
->X_add_number
&= 0xffff;
3598 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3601 /* Guess the instruction size based on the opcode. */
3604 thumb_insn_size (int opcode
)
3606 if ((unsigned int) opcode
< 0xe800u
)
3608 else if ((unsigned int) opcode
>= 0xe8000000u
)
3615 emit_insn (expressionS
*exp
, int nbytes
)
3619 if (exp
->X_op
== O_constant
)
3624 size
= thumb_insn_size (exp
->X_add_number
);
3628 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3630 as_bad (_(".inst.n operand too big. "\
3631 "Use .inst.w instead"));
3636 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3637 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3639 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3641 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3642 emit_thumb32_expr (exp
);
3644 emit_expr (exp
, (unsigned int) size
);
3646 it_fsm_post_encode ();
3650 as_bad (_("cannot determine Thumb instruction size. " \
3651 "Use .inst.n/.inst.w instead"));
3654 as_bad (_("constant expression required"));
3659 /* Like s_arm_elf_cons but do not use md_cons_align and
3660 set the mapping state to MAP_ARM/MAP_THUMB. */
3663 s_arm_elf_inst (int nbytes
)
3665 if (is_it_end_of_statement ())
3667 demand_empty_rest_of_line ();
3671 /* Calling mapping_state () here will not change ARM/THUMB,
3672 but will ensure not to be in DATA state. */
3675 mapping_state (MAP_THUMB
);
3680 as_bad (_("width suffixes are invalid in ARM mode"));
3681 ignore_rest_of_line ();
3687 mapping_state (MAP_ARM
);
3696 if (! emit_insn (& exp
, nbytes
))
3698 ignore_rest_of_line ();
3702 while (*input_line_pointer
++ == ',');
3704 /* Put terminator back into stream. */
3705 input_line_pointer
--;
3706 demand_empty_rest_of_line ();
3709 /* Parse a .rel31 directive. */
3712 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3719 if (*input_line_pointer
== '1')
3720 highbit
= 0x80000000;
3721 else if (*input_line_pointer
!= '0')
3722 as_bad (_("expected 0 or 1"));
3724 input_line_pointer
++;
3725 if (*input_line_pointer
!= ',')
3726 as_bad (_("missing comma"));
3727 input_line_pointer
++;
3729 #ifdef md_flush_pending_output
3730 md_flush_pending_output ();
3733 #ifdef md_cons_align
3737 mapping_state (MAP_DATA
);
3742 md_number_to_chars (p
, highbit
, 4);
3743 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3744 BFD_RELOC_ARM_PREL31
);
3746 demand_empty_rest_of_line ();
3749 /* Directives: AEABI stack-unwind tables. */
3751 /* Parse an unwind_fnstart directive. Simply records the current location. */
3754 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3756 demand_empty_rest_of_line ();
3757 if (unwind
.proc_start
)
3759 as_bad (_("duplicate .fnstart directive"));
3763 /* Mark the start of the function. */
3764 unwind
.proc_start
= expr_build_dot ();
3766 /* Reset the rest of the unwind info. */
3767 unwind
.opcode_count
= 0;
3768 unwind
.table_entry
= NULL
;
3769 unwind
.personality_routine
= NULL
;
3770 unwind
.personality_index
= -1;
3771 unwind
.frame_size
= 0;
3772 unwind
.fp_offset
= 0;
3773 unwind
.fp_reg
= REG_SP
;
3775 unwind
.sp_restored
= 0;
3779 /* Parse a handlerdata directive. Creates the exception handling table entry
3780 for the function. */
3783 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3785 demand_empty_rest_of_line ();
3786 if (!unwind
.proc_start
)
3787 as_bad (MISSING_FNSTART
);
3789 if (unwind
.table_entry
)
3790 as_bad (_("duplicate .handlerdata directive"));
3792 create_unwind_entry (1);
3795 /* Parse an unwind_fnend directive. Generates the index table entry. */
3798 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3803 unsigned int marked_pr_dependency
;
3805 demand_empty_rest_of_line ();
3807 if (!unwind
.proc_start
)
3809 as_bad (_(".fnend directive without .fnstart"));
3813 /* Add eh table entry. */
3814 if (unwind
.table_entry
== NULL
)
3815 val
= create_unwind_entry (0);
3819 /* Add index table entry. This is two words. */
3820 start_unwind_section (unwind
.saved_seg
, 1);
3821 frag_align (2, 0, 0);
3822 record_alignment (now_seg
, 2);
3824 ptr
= frag_more (8);
3826 where
= frag_now_fix () - 8;
3828 /* Self relative offset of the function start. */
3829 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3830 BFD_RELOC_ARM_PREL31
);
3832 /* Indicate dependency on EHABI-defined personality routines to the
3833 linker, if it hasn't been done already. */
3834 marked_pr_dependency
3835 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3836 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3837 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3839 static const char *const name
[] =
3841 "__aeabi_unwind_cpp_pr0",
3842 "__aeabi_unwind_cpp_pr1",
3843 "__aeabi_unwind_cpp_pr2"
3845 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3846 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3847 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3848 |= 1 << unwind
.personality_index
;
3852 /* Inline exception table entry. */
3853 md_number_to_chars (ptr
+ 4, val
, 4);
3855 /* Self relative offset of the table entry. */
3856 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3857 BFD_RELOC_ARM_PREL31
);
3859 /* Restore the original section. */
3860 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3862 unwind
.proc_start
= NULL
;
3866 /* Parse an unwind_cantunwind directive. */
3869 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3871 demand_empty_rest_of_line ();
3872 if (!unwind
.proc_start
)
3873 as_bad (MISSING_FNSTART
);
3875 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3876 as_bad (_("personality routine specified for cantunwind frame"));
3878 unwind
.personality_index
= -2;
3882 /* Parse a personalityindex directive. */
3885 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3889 if (!unwind
.proc_start
)
3890 as_bad (MISSING_FNSTART
);
3892 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3893 as_bad (_("duplicate .personalityindex directive"));
3897 if (exp
.X_op
!= O_constant
3898 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3900 as_bad (_("bad personality routine number"));
3901 ignore_rest_of_line ();
3905 unwind
.personality_index
= exp
.X_add_number
;
3907 demand_empty_rest_of_line ();
3911 /* Parse a personality directive. */
3914 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3918 if (!unwind
.proc_start
)
3919 as_bad (MISSING_FNSTART
);
3921 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3922 as_bad (_("duplicate .personality directive"));
3924 c
= get_symbol_name (& name
);
3925 p
= input_line_pointer
;
3927 ++ input_line_pointer
;
3928 unwind
.personality_routine
= symbol_find_or_make (name
);
3930 demand_empty_rest_of_line ();
3934 /* Parse a directive saving core registers. */
3937 s_arm_unwind_save_core (void)
3943 range
= parse_reg_list (&input_line_pointer
);
3946 as_bad (_("expected register list"));
3947 ignore_rest_of_line ();
3951 demand_empty_rest_of_line ();
3953 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3954 into .unwind_save {..., sp...}. We aren't bothered about the value of
3955 ip because it is clobbered by calls. */
3956 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3957 && (range
& 0x3000) == 0x1000)
3959 unwind
.opcode_count
--;
3960 unwind
.sp_restored
= 0;
3961 range
= (range
| 0x2000) & ~0x1000;
3962 unwind
.pending_offset
= 0;
3968 /* See if we can use the short opcodes. These pop a block of up to 8
3969 registers starting with r4, plus maybe r14. */
3970 for (n
= 0; n
< 8; n
++)
3972 /* Break at the first non-saved register. */
3973 if ((range
& (1 << (n
+ 4))) == 0)
3976 /* See if there are any other bits set. */
3977 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3979 /* Use the long form. */
3980 op
= 0x8000 | ((range
>> 4) & 0xfff);
3981 add_unwind_opcode (op
, 2);
3985 /* Use the short form. */
3987 op
= 0xa8; /* Pop r14. */
3989 op
= 0xa0; /* Do not pop r14. */
3991 add_unwind_opcode (op
, 1);
3998 op
= 0xb100 | (range
& 0xf);
3999 add_unwind_opcode (op
, 2);
4002 /* Record the number of bytes pushed. */
4003 for (n
= 0; n
< 16; n
++)
4005 if (range
& (1 << n
))
4006 unwind
.frame_size
+= 4;
4011 /* Parse a directive saving FPA registers. */
4014 s_arm_unwind_save_fpa (int reg
)
4020 /* Get Number of registers to transfer. */
4021 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4024 exp
.X_op
= O_illegal
;
4026 if (exp
.X_op
!= O_constant
)
4028 as_bad (_("expected , <constant>"));
4029 ignore_rest_of_line ();
4033 num_regs
= exp
.X_add_number
;
4035 if (num_regs
< 1 || num_regs
> 4)
4037 as_bad (_("number of registers must be in the range [1:4]"));
4038 ignore_rest_of_line ();
4042 demand_empty_rest_of_line ();
4047 op
= 0xb4 | (num_regs
- 1);
4048 add_unwind_opcode (op
, 1);
4053 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4054 add_unwind_opcode (op
, 2);
4056 unwind
.frame_size
+= num_regs
* 12;
4060 /* Parse a directive saving VFP registers for ARMv6 and above. */
4063 s_arm_unwind_save_vfp_armv6 (void)
4068 int num_vfpv3_regs
= 0;
4069 int num_regs_below_16
;
4071 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4074 as_bad (_("expected register list"));
4075 ignore_rest_of_line ();
4079 demand_empty_rest_of_line ();
4081 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4082 than FSTMX/FLDMX-style ones). */
4084 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4086 num_vfpv3_regs
= count
;
4087 else if (start
+ count
> 16)
4088 num_vfpv3_regs
= start
+ count
- 16;
4090 if (num_vfpv3_regs
> 0)
4092 int start_offset
= start
> 16 ? start
- 16 : 0;
4093 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4094 add_unwind_opcode (op
, 2);
4097 /* Generate opcode for registers numbered in the range 0 .. 15. */
4098 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4099 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4100 if (num_regs_below_16
> 0)
4102 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4103 add_unwind_opcode (op
, 2);
4106 unwind
.frame_size
+= count
* 8;
4110 /* Parse a directive saving VFP registers for pre-ARMv6. */
4113 s_arm_unwind_save_vfp (void)
4119 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4122 as_bad (_("expected register list"));
4123 ignore_rest_of_line ();
4127 demand_empty_rest_of_line ();
4132 op
= 0xb8 | (count
- 1);
4133 add_unwind_opcode (op
, 1);
4138 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4139 add_unwind_opcode (op
, 2);
4141 unwind
.frame_size
+= count
* 8 + 4;
4145 /* Parse a directive saving iWMMXt data registers. */
4148 s_arm_unwind_save_mmxwr (void)
4156 if (*input_line_pointer
== '{')
4157 input_line_pointer
++;
4161 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4165 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4170 as_tsktsk (_("register list not in ascending order"));
4173 if (*input_line_pointer
== '-')
4175 input_line_pointer
++;
4176 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4179 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4182 else if (reg
>= hi_reg
)
4184 as_bad (_("bad register range"));
4187 for (; reg
< hi_reg
; reg
++)
4191 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4193 skip_past_char (&input_line_pointer
, '}');
4195 demand_empty_rest_of_line ();
4197 /* Generate any deferred opcodes because we're going to be looking at
4199 flush_pending_unwind ();
4201 for (i
= 0; i
< 16; i
++)
4203 if (mask
& (1 << i
))
4204 unwind
.frame_size
+= 8;
4207 /* Attempt to combine with a previous opcode. We do this because gcc
4208 likes to output separate unwind directives for a single block of
4210 if (unwind
.opcode_count
> 0)
4212 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4213 if ((i
& 0xf8) == 0xc0)
4216 /* Only merge if the blocks are contiguous. */
4219 if ((mask
& 0xfe00) == (1 << 9))
4221 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4222 unwind
.opcode_count
--;
4225 else if (i
== 6 && unwind
.opcode_count
>= 2)
4227 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4231 op
= 0xffff << (reg
- 1);
4233 && ((mask
& op
) == (1u << (reg
- 1))))
4235 op
= (1 << (reg
+ i
+ 1)) - 1;
4236 op
&= ~((1 << reg
) - 1);
4238 unwind
.opcode_count
-= 2;
4245 /* We want to generate opcodes in the order the registers have been
4246 saved, ie. descending order. */
4247 for (reg
= 15; reg
>= -1; reg
--)
4249 /* Save registers in blocks. */
4251 || !(mask
& (1 << reg
)))
4253 /* We found an unsaved reg. Generate opcodes to save the
4260 op
= 0xc0 | (hi_reg
- 10);
4261 add_unwind_opcode (op
, 1);
4266 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4267 add_unwind_opcode (op
, 2);
4276 ignore_rest_of_line ();
4280 s_arm_unwind_save_mmxwcg (void)
4287 if (*input_line_pointer
== '{')
4288 input_line_pointer
++;
4290 skip_whitespace (input_line_pointer
);
4294 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4298 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4304 as_tsktsk (_("register list not in ascending order"));
4307 if (*input_line_pointer
== '-')
4309 input_line_pointer
++;
4310 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4313 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4316 else if (reg
>= hi_reg
)
4318 as_bad (_("bad register range"));
4321 for (; reg
< hi_reg
; reg
++)
4325 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4327 skip_past_char (&input_line_pointer
, '}');
4329 demand_empty_rest_of_line ();
4331 /* Generate any deferred opcodes because we're going to be looking at
4333 flush_pending_unwind ();
4335 for (reg
= 0; reg
< 16; reg
++)
4337 if (mask
& (1 << reg
))
4338 unwind
.frame_size
+= 4;
4341 add_unwind_opcode (op
, 2);
4344 ignore_rest_of_line ();
4348 /* Parse an unwind_save directive.
4349 If the argument is non-zero, this is a .vsave directive. */
4352 s_arm_unwind_save (int arch_v6
)
4355 struct reg_entry
*reg
;
4356 bfd_boolean had_brace
= FALSE
;
4358 if (!unwind
.proc_start
)
4359 as_bad (MISSING_FNSTART
);
4361 /* Figure out what sort of save we have. */
4362 peek
= input_line_pointer
;
4370 reg
= arm_reg_parse_multi (&peek
);
4374 as_bad (_("register expected"));
4375 ignore_rest_of_line ();
4384 as_bad (_("FPA .unwind_save does not take a register list"));
4385 ignore_rest_of_line ();
4388 input_line_pointer
= peek
;
4389 s_arm_unwind_save_fpa (reg
->number
);
4393 s_arm_unwind_save_core ();
4398 s_arm_unwind_save_vfp_armv6 ();
4400 s_arm_unwind_save_vfp ();
4403 case REG_TYPE_MMXWR
:
4404 s_arm_unwind_save_mmxwr ();
4407 case REG_TYPE_MMXWCG
:
4408 s_arm_unwind_save_mmxwcg ();
4412 as_bad (_(".unwind_save does not support this kind of register"));
4413 ignore_rest_of_line ();
4418 /* Parse an unwind_movsp directive. */
4421 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4427 if (!unwind
.proc_start
)
4428 as_bad (MISSING_FNSTART
);
4430 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4433 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4434 ignore_rest_of_line ();
4438 /* Optional constant. */
4439 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4441 if (immediate_for_directive (&offset
) == FAIL
)
4447 demand_empty_rest_of_line ();
4449 if (reg
== REG_SP
|| reg
== REG_PC
)
4451 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4455 if (unwind
.fp_reg
!= REG_SP
)
4456 as_bad (_("unexpected .unwind_movsp directive"));
4458 /* Generate opcode to restore the value. */
4460 add_unwind_opcode (op
, 1);
4462 /* Record the information for later. */
4463 unwind
.fp_reg
= reg
;
4464 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4465 unwind
.sp_restored
= 1;
4468 /* Parse an unwind_pad directive. */
4471 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4475 if (!unwind
.proc_start
)
4476 as_bad (MISSING_FNSTART
);
4478 if (immediate_for_directive (&offset
) == FAIL
)
4483 as_bad (_("stack increment must be multiple of 4"));
4484 ignore_rest_of_line ();
4488 /* Don't generate any opcodes, just record the details for later. */
4489 unwind
.frame_size
+= offset
;
4490 unwind
.pending_offset
+= offset
;
4492 demand_empty_rest_of_line ();
4495 /* Parse an unwind_setfp directive. */
4498 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4504 if (!unwind
.proc_start
)
4505 as_bad (MISSING_FNSTART
);
4507 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4508 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4511 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4513 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4515 as_bad (_("expected <reg>, <reg>"));
4516 ignore_rest_of_line ();
4520 /* Optional constant. */
4521 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4523 if (immediate_for_directive (&offset
) == FAIL
)
4529 demand_empty_rest_of_line ();
4531 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4533 as_bad (_("register must be either sp or set by a previous"
4534 "unwind_movsp directive"));
4538 /* Don't generate any opcodes, just record the information for later. */
4539 unwind
.fp_reg
= fp_reg
;
4541 if (sp_reg
== REG_SP
)
4542 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4544 unwind
.fp_offset
-= offset
;
4547 /* Parse an unwind_raw directive. */
4550 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4553 /* This is an arbitrary limit. */
4554 unsigned char op
[16];
4557 if (!unwind
.proc_start
)
4558 as_bad (MISSING_FNSTART
);
4561 if (exp
.X_op
== O_constant
4562 && skip_past_comma (&input_line_pointer
) != FAIL
)
4564 unwind
.frame_size
+= exp
.X_add_number
;
4568 exp
.X_op
= O_illegal
;
4570 if (exp
.X_op
!= O_constant
)
4572 as_bad (_("expected <offset>, <opcode>"));
4573 ignore_rest_of_line ();
4579 /* Parse the opcode. */
4584 as_bad (_("unwind opcode too long"));
4585 ignore_rest_of_line ();
4587 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4589 as_bad (_("invalid unwind opcode"));
4590 ignore_rest_of_line ();
4593 op
[count
++] = exp
.X_add_number
;
4595 /* Parse the next byte. */
4596 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4602 /* Add the opcode bytes in reverse order. */
4604 add_unwind_opcode (op
[count
], 1);
4606 demand_empty_rest_of_line ();
4610 /* Parse a .eabi_attribute directive. */
4613 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4615 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4617 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4618 attributes_set_explicitly
[tag
] = 1;
4621 /* Emit a tls fix for the symbol. */
4624 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4628 #ifdef md_flush_pending_output
4629 md_flush_pending_output ();
4632 #ifdef md_cons_align
4636 /* Since we're just labelling the code, there's no need to define a
4639 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4640 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4641 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4642 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4644 #endif /* OBJ_ELF */
4646 static void s_arm_arch (int);
4647 static void s_arm_object_arch (int);
4648 static void s_arm_cpu (int);
4649 static void s_arm_fpu (int);
4650 static void s_arm_arch_extension (int);
4655 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4662 if (exp
.X_op
== O_symbol
)
4663 exp
.X_op
= O_secrel
;
4665 emit_expr (&exp
, 4);
4667 while (*input_line_pointer
++ == ',');
4669 input_line_pointer
--;
4670 demand_empty_rest_of_line ();
4674 /* This table describes all the machine specific pseudo-ops the assembler
4675 has to support. The fields are:
4676 pseudo-op name without dot
4677 function to call to execute this pseudo-op
4678 Integer arg to pass to the function. */
4680 const pseudo_typeS md_pseudo_table
[] =
4682 /* Never called because '.req' does not start a line. */
4683 { "req", s_req
, 0 },
4684 /* Following two are likewise never called. */
4687 { "unreq", s_unreq
, 0 },
4688 { "bss", s_bss
, 0 },
4689 { "align", s_align_ptwo
, 2 },
4690 { "arm", s_arm
, 0 },
4691 { "thumb", s_thumb
, 0 },
4692 { "code", s_code
, 0 },
4693 { "force_thumb", s_force_thumb
, 0 },
4694 { "thumb_func", s_thumb_func
, 0 },
4695 { "thumb_set", s_thumb_set
, 0 },
4696 { "even", s_even
, 0 },
4697 { "ltorg", s_ltorg
, 0 },
4698 { "pool", s_ltorg
, 0 },
4699 { "syntax", s_syntax
, 0 },
4700 { "cpu", s_arm_cpu
, 0 },
4701 { "arch", s_arm_arch
, 0 },
4702 { "object_arch", s_arm_object_arch
, 0 },
4703 { "fpu", s_arm_fpu
, 0 },
4704 { "arch_extension", s_arm_arch_extension
, 0 },
4706 { "word", s_arm_elf_cons
, 4 },
4707 { "long", s_arm_elf_cons
, 4 },
4708 { "inst.n", s_arm_elf_inst
, 2 },
4709 { "inst.w", s_arm_elf_inst
, 4 },
4710 { "inst", s_arm_elf_inst
, 0 },
4711 { "rel31", s_arm_rel31
, 0 },
4712 { "fnstart", s_arm_unwind_fnstart
, 0 },
4713 { "fnend", s_arm_unwind_fnend
, 0 },
4714 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4715 { "personality", s_arm_unwind_personality
, 0 },
4716 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4717 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4718 { "save", s_arm_unwind_save
, 0 },
4719 { "vsave", s_arm_unwind_save
, 1 },
4720 { "movsp", s_arm_unwind_movsp
, 0 },
4721 { "pad", s_arm_unwind_pad
, 0 },
4722 { "setfp", s_arm_unwind_setfp
, 0 },
4723 { "unwind_raw", s_arm_unwind_raw
, 0 },
4724 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4725 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4729 /* These are used for dwarf. */
4733 /* These are used for dwarf2. */
4734 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4735 { "loc", dwarf2_directive_loc
, 0 },
4736 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4738 { "extend", float_cons
, 'x' },
4739 { "ldouble", float_cons
, 'x' },
4740 { "packed", float_cons
, 'p' },
4742 {"secrel32", pe_directive_secrel
, 0},
4745 /* These are for compatibility with CodeComposer Studio. */
4746 {"ref", s_ccs_ref
, 0},
4747 {"def", s_ccs_def
, 0},
4748 {"asmfunc", s_ccs_asmfunc
, 0},
4749 {"endasmfunc", s_ccs_endasmfunc
, 0},
4754 /* Parser functions used exclusively in instruction operands. */
4756 /* Generic immediate-value read function for use in insn parsing.
4757 STR points to the beginning of the immediate (the leading #);
4758 VAL receives the value; if the value is outside [MIN, MAX]
4759 issue an error. PREFIX_OPT is true if the immediate prefix is
4763 parse_immediate (char **str
, int *val
, int min
, int max
,
4764 bfd_boolean prefix_opt
)
4767 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4768 if (exp
.X_op
!= O_constant
)
4770 inst
.error
= _("constant expression required");
4774 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4776 inst
.error
= _("immediate value out of range");
4780 *val
= exp
.X_add_number
;
4784 /* Less-generic immediate-value read function with the possibility of loading a
4785 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4786 instructions. Puts the result directly in inst.operands[i]. */
4789 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4790 bfd_boolean allow_symbol_p
)
4793 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4796 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4798 if (exp_p
->X_op
== O_constant
)
4800 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4801 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4802 O_constant. We have to be careful not to break compilation for
4803 32-bit X_add_number, though. */
4804 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4806 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4807 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4809 inst
.operands
[i
].regisimm
= 1;
4812 else if (exp_p
->X_op
== O_big
4813 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4815 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4817 /* Bignums have their least significant bits in
4818 generic_bignum[0]. Make sure we put 32 bits in imm and
4819 32 bits in reg, in a (hopefully) portable way. */
4820 gas_assert (parts
!= 0);
4822 /* Make sure that the number is not too big.
4823 PR 11972: Bignums can now be sign-extended to the
4824 size of a .octa so check that the out of range bits
4825 are all zero or all one. */
4826 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4828 LITTLENUM_TYPE m
= -1;
4830 if (generic_bignum
[parts
* 2] != 0
4831 && generic_bignum
[parts
* 2] != m
)
4834 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4835 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4839 inst
.operands
[i
].imm
= 0;
4840 for (j
= 0; j
< parts
; j
++, idx
++)
4841 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4842 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4843 inst
.operands
[i
].reg
= 0;
4844 for (j
= 0; j
< parts
; j
++, idx
++)
4845 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4846 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4847 inst
.operands
[i
].regisimm
= 1;
4849 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4857 /* Returns the pseudo-register number of an FPA immediate constant,
4858 or FAIL if there isn't a valid constant here. */
4861 parse_fpa_immediate (char ** str
)
4863 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4869 /* First try and match exact strings, this is to guarantee
4870 that some formats will work even for cross assembly. */
4872 for (i
= 0; fp_const
[i
]; i
++)
4874 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4878 *str
+= strlen (fp_const
[i
]);
4879 if (is_end_of_line
[(unsigned char) **str
])
4885 /* Just because we didn't get a match doesn't mean that the constant
4886 isn't valid, just that it is in a format that we don't
4887 automatically recognize. Try parsing it with the standard
4888 expression routines. */
4890 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4892 /* Look for a raw floating point number. */
4893 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4894 && is_end_of_line
[(unsigned char) *save_in
])
4896 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4898 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4900 if (words
[j
] != fp_values
[i
][j
])
4904 if (j
== MAX_LITTLENUMS
)
4912 /* Try and parse a more complex expression, this will probably fail
4913 unless the code uses a floating point prefix (eg "0f"). */
4914 save_in
= input_line_pointer
;
4915 input_line_pointer
= *str
;
4916 if (expression (&exp
) == absolute_section
4917 && exp
.X_op
== O_big
4918 && exp
.X_add_number
< 0)
4920 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4922 #define X_PRECISION 5
4923 #define E_PRECISION 15L
4924 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4926 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4928 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4930 if (words
[j
] != fp_values
[i
][j
])
4934 if (j
== MAX_LITTLENUMS
)
4936 *str
= input_line_pointer
;
4937 input_line_pointer
= save_in
;
4944 *str
= input_line_pointer
;
4945 input_line_pointer
= save_in
;
4946 inst
.error
= _("invalid FPA immediate expression");
4950 /* Returns 1 if a number has "quarter-precision" float format
4951 0baBbbbbbc defgh000 00000000 00000000. */
4954 is_quarter_float (unsigned imm
)
4956 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4957 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4961 /* Detect the presence of a floating point or integer zero constant,
4965 parse_ifimm_zero (char **in
)
4969 if (!is_immediate_prefix (**in
))
4974 /* Accept #0x0 as a synonym for #0. */
4975 if (strncmp (*in
, "0x", 2) == 0)
4978 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4983 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4984 &generic_floating_point_number
);
4987 && generic_floating_point_number
.sign
== '+'
4988 && (generic_floating_point_number
.low
4989 > generic_floating_point_number
.leader
))
4995 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4996 0baBbbbbbc defgh000 00000000 00000000.
4997 The zero and minus-zero cases need special handling, since they can't be
4998 encoded in the "quarter-precision" float format, but can nonetheless be
4999 loaded as integer constants. */
5002 parse_qfloat_immediate (char **ccp
, int *immed
)
5006 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5007 int found_fpchar
= 0;
5009 skip_past_char (&str
, '#');
5011 /* We must not accidentally parse an integer as a floating-point number. Make
5012 sure that the value we parse is not an integer by checking for special
5013 characters '.' or 'e'.
5014 FIXME: This is a horrible hack, but doing better is tricky because type
5015 information isn't in a very usable state at parse time. */
5017 skip_whitespace (fpnum
);
5019 if (strncmp (fpnum
, "0x", 2) == 0)
5023 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5024 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5034 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5036 unsigned fpword
= 0;
5039 /* Our FP word must be 32 bits (single-precision FP). */
5040 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5042 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5046 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5059 /* Shift operands. */
5062 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5065 struct asm_shift_name
5068 enum shift_kind kind
;
5071 /* Third argument to parse_shift. */
5072 enum parse_shift_mode
5074 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5075 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5076 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5077 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5078 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5081 /* Parse a <shift> specifier on an ARM data processing instruction.
5082 This has three forms:
5084 (LSL|LSR|ASL|ASR|ROR) Rs
5085 (LSL|LSR|ASL|ASR|ROR) #imm
5088 Note that ASL is assimilated to LSL in the instruction encoding, and
5089 RRX to ROR #0 (which cannot be written as such). */
5092 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5094 const struct asm_shift_name
*shift_name
;
5095 enum shift_kind shift
;
5100 for (p
= *str
; ISALPHA (*p
); p
++)
5105 inst
.error
= _("shift expression expected");
5109 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5112 if (shift_name
== NULL
)
5114 inst
.error
= _("shift expression expected");
5118 shift
= shift_name
->kind
;
5122 case NO_SHIFT_RESTRICT
:
5123 case SHIFT_IMMEDIATE
: break;
5125 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5126 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5128 inst
.error
= _("'LSL' or 'ASR' required");
5133 case SHIFT_LSL_IMMEDIATE
:
5134 if (shift
!= SHIFT_LSL
)
5136 inst
.error
= _("'LSL' required");
5141 case SHIFT_ASR_IMMEDIATE
:
5142 if (shift
!= SHIFT_ASR
)
5144 inst
.error
= _("'ASR' required");
5152 if (shift
!= SHIFT_RRX
)
5154 /* Whitespace can appear here if the next thing is a bare digit. */
5155 skip_whitespace (p
);
5157 if (mode
== NO_SHIFT_RESTRICT
5158 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5160 inst
.operands
[i
].imm
= reg
;
5161 inst
.operands
[i
].immisreg
= 1;
5163 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5166 inst
.operands
[i
].shift_kind
= shift
;
5167 inst
.operands
[i
].shifted
= 1;
5172 /* Parse a <shifter_operand> for an ARM data processing instruction:
5175 #<immediate>, <rotate>
5179 where <shift> is defined by parse_shift above, and <rotate> is a
5180 multiple of 2 between 0 and 30. Validation of immediate operands
5181 is deferred to md_apply_fix. */
5184 parse_shifter_operand (char **str
, int i
)
5189 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5191 inst
.operands
[i
].reg
= value
;
5192 inst
.operands
[i
].isreg
= 1;
5194 /* parse_shift will override this if appropriate */
5195 inst
.reloc
.exp
.X_op
= O_constant
;
5196 inst
.reloc
.exp
.X_add_number
= 0;
5198 if (skip_past_comma (str
) == FAIL
)
5201 /* Shift operation on register. */
5202 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5205 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5208 if (skip_past_comma (str
) == SUCCESS
)
5210 /* #x, y -- ie explicit rotation by Y. */
5211 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5214 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5216 inst
.error
= _("constant expression expected");
5220 value
= exp
.X_add_number
;
5221 if (value
< 0 || value
> 30 || value
% 2 != 0)
5223 inst
.error
= _("invalid rotation");
5226 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5228 inst
.error
= _("invalid constant");
5232 /* Encode as specified. */
5233 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5237 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5238 inst
.reloc
.pc_rel
= 0;
5242 /* Group relocation information. Each entry in the table contains the
5243 textual name of the relocation as may appear in assembler source
5244 and must end with a colon.
5245 Along with this textual name are the relocation codes to be used if
5246 the corresponding instruction is an ALU instruction (ADD or SUB only),
5247 an LDR, an LDRS, or an LDC. */
5249 struct group_reloc_table_entry
5260 /* Varieties of non-ALU group relocation. */
5267 static struct group_reloc_table_entry group_reloc_table
[] =
5268 { /* Program counter relative: */
5270 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5275 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5276 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5277 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5278 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5280 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5285 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5286 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5287 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5288 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5290 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5291 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5292 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5293 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5294 /* Section base relative */
5296 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5301 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5302 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5303 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5304 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5306 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5311 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5312 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5313 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5314 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5316 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5317 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5318 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5319 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5320 /* Absolute thumb alu relocations. */
5322 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5327 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5332 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5337 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5342 /* Given the address of a pointer pointing to the textual name of a group
5343 relocation as may appear in assembler source, attempt to find its details
5344 in group_reloc_table. The pointer will be updated to the character after
5345 the trailing colon. On failure, FAIL will be returned; SUCCESS
5346 otherwise. On success, *entry will be updated to point at the relevant
5347 group_reloc_table entry. */
5350 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5353 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5355 int length
= strlen (group_reloc_table
[i
].name
);
5357 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5358 && (*str
)[length
] == ':')
5360 *out
= &group_reloc_table
[i
];
5361 *str
+= (length
+ 1);
5369 /* Parse a <shifter_operand> for an ARM data processing instruction
5370 (as for parse_shifter_operand) where group relocations are allowed:
5373 #<immediate>, <rotate>
5374 #:<group_reloc>:<expression>
5378 where <group_reloc> is one of the strings defined in group_reloc_table.
5379 The hashes are optional.
5381 Everything else is as for parse_shifter_operand. */
5383 static parse_operand_result
5384 parse_shifter_operand_group_reloc (char **str
, int i
)
5386 /* Determine if we have the sequence of characters #: or just :
5387 coming next. If we do, then we check for a group relocation.
5388 If we don't, punt the whole lot to parse_shifter_operand. */
5390 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5391 || (*str
)[0] == ':')
5393 struct group_reloc_table_entry
*entry
;
5395 if ((*str
)[0] == '#')
5400 /* Try to parse a group relocation. Anything else is an error. */
5401 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5403 inst
.error
= _("unknown group relocation");
5404 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5407 /* We now have the group relocation table entry corresponding to
5408 the name in the assembler source. Next, we parse the expression. */
5409 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5410 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5412 /* Record the relocation type (always the ALU variant here). */
5413 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5414 gas_assert (inst
.reloc
.type
!= 0);
5416 return PARSE_OPERAND_SUCCESS
;
5419 return parse_shifter_operand (str
, i
) == SUCCESS
5420 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5422 /* Never reached. */
5425 /* Parse a Neon alignment expression. Information is written to
5426 inst.operands[i]. We assume the initial ':' has been skipped.
5428 align .imm = align << 8, .immisalign=1, .preind=0 */
5429 static parse_operand_result
5430 parse_neon_alignment (char **str
, int i
)
5435 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5437 if (exp
.X_op
!= O_constant
)
5439 inst
.error
= _("alignment must be constant");
5440 return PARSE_OPERAND_FAIL
;
5443 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5444 inst
.operands
[i
].immisalign
= 1;
5445 /* Alignments are not pre-indexes. */
5446 inst
.operands
[i
].preind
= 0;
5449 return PARSE_OPERAND_SUCCESS
;
5452 /* Parse all forms of an ARM address expression. Information is written
5453 to inst.operands[i] and/or inst.reloc.
5455 Preindexed addressing (.preind=1):
5457 [Rn, #offset] .reg=Rn .reloc.exp=offset
5458 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5459 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5460 .shift_kind=shift .reloc.exp=shift_imm
5462 These three may have a trailing ! which causes .writeback to be set also.
5464 Postindexed addressing (.postind=1, .writeback=1):
5466 [Rn], #offset .reg=Rn .reloc.exp=offset
5467 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5468 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5469 .shift_kind=shift .reloc.exp=shift_imm
5471 Unindexed addressing (.preind=0, .postind=0):
5473 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5477 [Rn]{!} shorthand for [Rn,#0]{!}
5478 =immediate .isreg=0 .reloc.exp=immediate
5479 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5481 It is the caller's responsibility to check for addressing modes not
5482 supported by the instruction, and to set inst.reloc.type. */
5484 static parse_operand_result
5485 parse_address_main (char **str
, int i
, int group_relocations
,
5486 group_reloc_type group_type
)
5491 if (skip_past_char (&p
, '[') == FAIL
)
5493 if (skip_past_char (&p
, '=') == FAIL
)
5495 /* Bare address - translate to PC-relative offset. */
5496 inst
.reloc
.pc_rel
= 1;
5497 inst
.operands
[i
].reg
= REG_PC
;
5498 inst
.operands
[i
].isreg
= 1;
5499 inst
.operands
[i
].preind
= 1;
5501 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5502 return PARSE_OPERAND_FAIL
;
5504 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5505 /*allow_symbol_p=*/TRUE
))
5506 return PARSE_OPERAND_FAIL
;
5509 return PARSE_OPERAND_SUCCESS
;
5512 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5513 skip_whitespace (p
);
5515 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5517 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5518 return PARSE_OPERAND_FAIL
;
5520 inst
.operands
[i
].reg
= reg
;
5521 inst
.operands
[i
].isreg
= 1;
5523 if (skip_past_comma (&p
) == SUCCESS
)
5525 inst
.operands
[i
].preind
= 1;
5528 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5530 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5532 inst
.operands
[i
].imm
= reg
;
5533 inst
.operands
[i
].immisreg
= 1;
5535 if (skip_past_comma (&p
) == SUCCESS
)
5536 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5537 return PARSE_OPERAND_FAIL
;
5539 else if (skip_past_char (&p
, ':') == SUCCESS
)
5541 /* FIXME: '@' should be used here, but it's filtered out by generic
5542 code before we get to see it here. This may be subject to
5544 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5546 if (result
!= PARSE_OPERAND_SUCCESS
)
5551 if (inst
.operands
[i
].negative
)
5553 inst
.operands
[i
].negative
= 0;
5557 if (group_relocations
5558 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5560 struct group_reloc_table_entry
*entry
;
5562 /* Skip over the #: or : sequence. */
5568 /* Try to parse a group relocation. Anything else is an
5570 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5572 inst
.error
= _("unknown group relocation");
5573 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5576 /* We now have the group relocation table entry corresponding to
5577 the name in the assembler source. Next, we parse the
5579 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5580 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5582 /* Record the relocation type. */
5586 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5590 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5594 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5601 if (inst
.reloc
.type
== 0)
5603 inst
.error
= _("this group relocation is not allowed on this instruction");
5604 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5610 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5611 return PARSE_OPERAND_FAIL
;
5612 /* If the offset is 0, find out if it's a +0 or -0. */
5613 if (inst
.reloc
.exp
.X_op
== O_constant
5614 && inst
.reloc
.exp
.X_add_number
== 0)
5616 skip_whitespace (q
);
5620 skip_whitespace (q
);
5623 inst
.operands
[i
].negative
= 1;
5628 else if (skip_past_char (&p
, ':') == SUCCESS
)
5630 /* FIXME: '@' should be used here, but it's filtered out by generic code
5631 before we get to see it here. This may be subject to change. */
5632 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5634 if (result
!= PARSE_OPERAND_SUCCESS
)
5638 if (skip_past_char (&p
, ']') == FAIL
)
5640 inst
.error
= _("']' expected");
5641 return PARSE_OPERAND_FAIL
;
5644 if (skip_past_char (&p
, '!') == SUCCESS
)
5645 inst
.operands
[i
].writeback
= 1;
5647 else if (skip_past_comma (&p
) == SUCCESS
)
5649 if (skip_past_char (&p
, '{') == SUCCESS
)
5651 /* [Rn], {expr} - unindexed, with option */
5652 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5653 0, 255, TRUE
) == FAIL
)
5654 return PARSE_OPERAND_FAIL
;
5656 if (skip_past_char (&p
, '}') == FAIL
)
5658 inst
.error
= _("'}' expected at end of 'option' field");
5659 return PARSE_OPERAND_FAIL
;
5661 if (inst
.operands
[i
].preind
)
5663 inst
.error
= _("cannot combine index with option");
5664 return PARSE_OPERAND_FAIL
;
5667 return PARSE_OPERAND_SUCCESS
;
5671 inst
.operands
[i
].postind
= 1;
5672 inst
.operands
[i
].writeback
= 1;
5674 if (inst
.operands
[i
].preind
)
5676 inst
.error
= _("cannot combine pre- and post-indexing");
5677 return PARSE_OPERAND_FAIL
;
5681 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5683 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5685 /* We might be using the immediate for alignment already. If we
5686 are, OR the register number into the low-order bits. */
5687 if (inst
.operands
[i
].immisalign
)
5688 inst
.operands
[i
].imm
|= reg
;
5690 inst
.operands
[i
].imm
= reg
;
5691 inst
.operands
[i
].immisreg
= 1;
5693 if (skip_past_comma (&p
) == SUCCESS
)
5694 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5695 return PARSE_OPERAND_FAIL
;
5700 if (inst
.operands
[i
].negative
)
5702 inst
.operands
[i
].negative
= 0;
5705 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5706 return PARSE_OPERAND_FAIL
;
5707 /* If the offset is 0, find out if it's a +0 or -0. */
5708 if (inst
.reloc
.exp
.X_op
== O_constant
5709 && inst
.reloc
.exp
.X_add_number
== 0)
5711 skip_whitespace (q
);
5715 skip_whitespace (q
);
5718 inst
.operands
[i
].negative
= 1;
5724 /* If at this point neither .preind nor .postind is set, we have a
5725 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5726 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5728 inst
.operands
[i
].preind
= 1;
5729 inst
.reloc
.exp
.X_op
= O_constant
;
5730 inst
.reloc
.exp
.X_add_number
= 0;
5733 return PARSE_OPERAND_SUCCESS
;
5737 parse_address (char **str
, int i
)
5739 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5743 static parse_operand_result
5744 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5746 return parse_address_main (str
, i
, 1, type
);
5749 /* Parse an operand for a MOVW or MOVT instruction. */
5751 parse_half (char **str
)
5756 skip_past_char (&p
, '#');
5757 if (strncasecmp (p
, ":lower16:", 9) == 0)
5758 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5759 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5760 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5762 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5765 skip_whitespace (p
);
5768 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5771 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5773 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5775 inst
.error
= _("constant expression expected");
5778 if (inst
.reloc
.exp
.X_add_number
< 0
5779 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5781 inst
.error
= _("immediate value out of range");
5789 /* Miscellaneous. */
5791 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5792 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5794 parse_psr (char **str
, bfd_boolean lhs
)
5797 unsigned long psr_field
;
5798 const struct asm_psr
*psr
;
5800 bfd_boolean is_apsr
= FALSE
;
5801 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5803 /* PR gas/12698: If the user has specified -march=all then m_profile will
5804 be TRUE, but we want to ignore it in this case as we are building for any
5805 CPU type, including non-m variants. */
5806 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5809 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5810 feature for ease of use and backwards compatibility. */
5812 if (strncasecmp (p
, "SPSR", 4) == 0)
5815 goto unsupported_psr
;
5817 psr_field
= SPSR_BIT
;
5819 else if (strncasecmp (p
, "CPSR", 4) == 0)
5822 goto unsupported_psr
;
5826 else if (strncasecmp (p
, "APSR", 4) == 0)
5828 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5829 and ARMv7-R architecture CPUs. */
5838 while (ISALNUM (*p
) || *p
== '_');
5840 if (strncasecmp (start
, "iapsr", 5) == 0
5841 || strncasecmp (start
, "eapsr", 5) == 0
5842 || strncasecmp (start
, "xpsr", 4) == 0
5843 || strncasecmp (start
, "psr", 3) == 0)
5844 p
= start
+ strcspn (start
, "rR") + 1;
5846 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5852 /* If APSR is being written, a bitfield may be specified. Note that
5853 APSR itself is handled above. */
5854 if (psr
->field
<= 3)
5856 psr_field
= psr
->field
;
5862 /* M-profile MSR instructions have the mask field set to "10", except
5863 *PSR variants which modify APSR, which may use a different mask (and
5864 have been handled already). Do that by setting the PSR_f field
5866 return psr
->field
| (lhs
? PSR_f
: 0);
5869 goto unsupported_psr
;
5875 /* A suffix follows. */
5881 while (ISALNUM (*p
) || *p
== '_');
5885 /* APSR uses a notation for bits, rather than fields. */
5886 unsigned int nzcvq_bits
= 0;
5887 unsigned int g_bit
= 0;
5890 for (bit
= start
; bit
!= p
; bit
++)
5892 switch (TOLOWER (*bit
))
5895 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5899 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5903 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5907 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5911 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5915 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5919 inst
.error
= _("unexpected bit specified after APSR");
5924 if (nzcvq_bits
== 0x1f)
5929 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5931 inst
.error
= _("selected processor does not "
5932 "support DSP extension");
5939 if ((nzcvq_bits
& 0x20) != 0
5940 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5941 || (g_bit
& 0x2) != 0)
5943 inst
.error
= _("bad bitmask specified after APSR");
5949 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5954 psr_field
|= psr
->field
;
5960 goto error
; /* Garbage after "[CS]PSR". */
5962 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5963 is deprecated, but allow it anyway. */
5967 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5970 else if (!m_profile
)
5971 /* These bits are never right for M-profile devices: don't set them
5972 (only code paths which read/write APSR reach here). */
5973 psr_field
|= (PSR_c
| PSR_f
);
5979 inst
.error
= _("selected processor does not support requested special "
5980 "purpose register");
5984 inst
.error
= _("flag for {c}psr instruction expected");
5988 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5989 value suitable for splatting into the AIF field of the instruction. */
5992 parse_cps_flags (char **str
)
6001 case '\0': case ',':
6004 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6005 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6006 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6009 inst
.error
= _("unrecognized CPS flag");
6014 if (saw_a_flag
== 0)
6016 inst
.error
= _("missing CPS flags");
6024 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6025 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6028 parse_endian_specifier (char **str
)
6033 if (strncasecmp (s
, "BE", 2))
6035 else if (strncasecmp (s
, "LE", 2))
6039 inst
.error
= _("valid endian specifiers are be or le");
6043 if (ISALNUM (s
[2]) || s
[2] == '_')
6045 inst
.error
= _("valid endian specifiers are be or le");
6050 return little_endian
;
6053 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6054 value suitable for poking into the rotate field of an sxt or sxta
6055 instruction, or FAIL on error. */
6058 parse_ror (char **str
)
6063 if (strncasecmp (s
, "ROR", 3) == 0)
6067 inst
.error
= _("missing rotation field after comma");
6071 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6076 case 0: *str
= s
; return 0x0;
6077 case 8: *str
= s
; return 0x1;
6078 case 16: *str
= s
; return 0x2;
6079 case 24: *str
= s
; return 0x3;
6082 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6087 /* Parse a conditional code (from conds[] below). The value returned is in the
6088 range 0 .. 14, or FAIL. */
6090 parse_cond (char **str
)
6093 const struct asm_cond
*c
;
6095 /* Condition codes are always 2 characters, so matching up to
6096 3 characters is sufficient. */
6101 while (ISALPHA (*q
) && n
< 3)
6103 cond
[n
] = TOLOWER (*q
);
6108 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6111 inst
.error
= _("condition required");
6119 /* Record a use of the given feature. */
6121 record_feature_use (const arm_feature_set
*feature
)
6124 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6126 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6129 /* If the given feature available in the selected CPU, mark it as used.
6130 Returns TRUE iff feature is available. */
6132 mark_feature_used (const arm_feature_set
*feature
)
6134 /* Ensure the option is valid on the current architecture. */
6135 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6138 /* Add the appropriate architecture feature for the barrier option used.
6140 record_feature_use (feature
);
6145 /* Parse an option for a barrier instruction. Returns the encoding for the
6148 parse_barrier (char **str
)
6151 const struct asm_barrier_opt
*o
;
6154 while (ISALPHA (*q
))
6157 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6162 if (!mark_feature_used (&o
->arch
))
6169 /* Parse the operands of a table branch instruction. Similar to a memory
6172 parse_tb (char **str
)
6177 if (skip_past_char (&p
, '[') == FAIL
)
6179 inst
.error
= _("'[' expected");
6183 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6185 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6188 inst
.operands
[0].reg
= reg
;
6190 if (skip_past_comma (&p
) == FAIL
)
6192 inst
.error
= _("',' expected");
6196 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6198 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6201 inst
.operands
[0].imm
= reg
;
6203 if (skip_past_comma (&p
) == SUCCESS
)
6205 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6207 if (inst
.reloc
.exp
.X_add_number
!= 1)
6209 inst
.error
= _("invalid shift");
6212 inst
.operands
[0].shifted
= 1;
6215 if (skip_past_char (&p
, ']') == FAIL
)
6217 inst
.error
= _("']' expected");
6224 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6225 information on the types the operands can take and how they are encoded.
6226 Up to four operands may be read; this function handles setting the
6227 ".present" field for each read operand itself.
6228 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6229 else returns FAIL. */
6232 parse_neon_mov (char **str
, int *which_operand
)
6234 int i
= *which_operand
, val
;
6235 enum arm_reg_type rtype
;
6237 struct neon_type_el optype
;
6239 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6241 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6242 inst
.operands
[i
].reg
= val
;
6243 inst
.operands
[i
].isscalar
= 1;
6244 inst
.operands
[i
].vectype
= optype
;
6245 inst
.operands
[i
++].present
= 1;
6247 if (skip_past_comma (&ptr
) == FAIL
)
6250 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6253 inst
.operands
[i
].reg
= val
;
6254 inst
.operands
[i
].isreg
= 1;
6255 inst
.operands
[i
].present
= 1;
6257 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6260 /* Cases 0, 1, 2, 3, 5 (D only). */
6261 if (skip_past_comma (&ptr
) == FAIL
)
6264 inst
.operands
[i
].reg
= val
;
6265 inst
.operands
[i
].isreg
= 1;
6266 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6267 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6268 inst
.operands
[i
].isvec
= 1;
6269 inst
.operands
[i
].vectype
= optype
;
6270 inst
.operands
[i
++].present
= 1;
6272 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6274 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6275 Case 13: VMOV <Sd>, <Rm> */
6276 inst
.operands
[i
].reg
= val
;
6277 inst
.operands
[i
].isreg
= 1;
6278 inst
.operands
[i
].present
= 1;
6280 if (rtype
== REG_TYPE_NQ
)
6282 first_error (_("can't use Neon quad register here"));
6285 else if (rtype
!= REG_TYPE_VFS
)
6288 if (skip_past_comma (&ptr
) == FAIL
)
6290 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6292 inst
.operands
[i
].reg
= val
;
6293 inst
.operands
[i
].isreg
= 1;
6294 inst
.operands
[i
].present
= 1;
6297 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6300 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6301 Case 1: VMOV<c><q> <Dd>, <Dm>
6302 Case 8: VMOV.F32 <Sd>, <Sm>
6303 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6305 inst
.operands
[i
].reg
= val
;
6306 inst
.operands
[i
].isreg
= 1;
6307 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6308 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6309 inst
.operands
[i
].isvec
= 1;
6310 inst
.operands
[i
].vectype
= optype
;
6311 inst
.operands
[i
].present
= 1;
6313 if (skip_past_comma (&ptr
) == SUCCESS
)
6318 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6321 inst
.operands
[i
].reg
= val
;
6322 inst
.operands
[i
].isreg
= 1;
6323 inst
.operands
[i
++].present
= 1;
6325 if (skip_past_comma (&ptr
) == FAIL
)
6328 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6331 inst
.operands
[i
].reg
= val
;
6332 inst
.operands
[i
].isreg
= 1;
6333 inst
.operands
[i
].present
= 1;
6336 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6337 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6338 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6339 Case 10: VMOV.F32 <Sd>, #<imm>
6340 Case 11: VMOV.F64 <Dd>, #<imm> */
6341 inst
.operands
[i
].immisfloat
= 1;
6342 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6344 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6345 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6349 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6353 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6356 inst
.operands
[i
].reg
= val
;
6357 inst
.operands
[i
].isreg
= 1;
6358 inst
.operands
[i
++].present
= 1;
6360 if (skip_past_comma (&ptr
) == FAIL
)
6363 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6365 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6366 inst
.operands
[i
].reg
= val
;
6367 inst
.operands
[i
].isscalar
= 1;
6368 inst
.operands
[i
].present
= 1;
6369 inst
.operands
[i
].vectype
= optype
;
6371 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6373 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6374 inst
.operands
[i
].reg
= val
;
6375 inst
.operands
[i
].isreg
= 1;
6376 inst
.operands
[i
++].present
= 1;
6378 if (skip_past_comma (&ptr
) == FAIL
)
6381 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6384 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6388 inst
.operands
[i
].reg
= val
;
6389 inst
.operands
[i
].isreg
= 1;
6390 inst
.operands
[i
].isvec
= 1;
6391 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6392 inst
.operands
[i
].vectype
= optype
;
6393 inst
.operands
[i
].present
= 1;
6395 if (rtype
== REG_TYPE_VFS
)
6399 if (skip_past_comma (&ptr
) == FAIL
)
6401 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6404 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6407 inst
.operands
[i
].reg
= val
;
6408 inst
.operands
[i
].isreg
= 1;
6409 inst
.operands
[i
].isvec
= 1;
6410 inst
.operands
[i
].issingle
= 1;
6411 inst
.operands
[i
].vectype
= optype
;
6412 inst
.operands
[i
].present
= 1;
6415 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6419 inst
.operands
[i
].reg
= val
;
6420 inst
.operands
[i
].isreg
= 1;
6421 inst
.operands
[i
].isvec
= 1;
6422 inst
.operands
[i
].issingle
= 1;
6423 inst
.operands
[i
].vectype
= optype
;
6424 inst
.operands
[i
].present
= 1;
6429 first_error (_("parse error"));
6433 /* Successfully parsed the operands. Update args. */
6439 first_error (_("expected comma"));
6443 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6447 /* Use this macro when the operand constraints are different
6448 for ARM and THUMB (e.g. ldrd). */
6449 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6450 ((arm_operand) | ((thumb_operand) << 16))
6452 /* Matcher codes for parse_operands. */
6453 enum operand_parse_code
6455 OP_stop
, /* end of line */
6457 OP_RR
, /* ARM register */
6458 OP_RRnpc
, /* ARM register, not r15 */
6459 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6460 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6461 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6462 optional trailing ! */
6463 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6464 OP_RCP
, /* Coprocessor number */
6465 OP_RCN
, /* Coprocessor register */
6466 OP_RF
, /* FPA register */
6467 OP_RVS
, /* VFP single precision register */
6468 OP_RVD
, /* VFP double precision register (0..15) */
6469 OP_RND
, /* Neon double precision register (0..31) */
6470 OP_RNQ
, /* Neon quad precision register */
6471 OP_RVSD
, /* VFP single or double precision register */
6472 OP_RNDQ
, /* Neon double or quad precision register */
6473 OP_RNSDQ
, /* Neon single, double or quad precision register */
6474 OP_RNSC
, /* Neon scalar D[X] */
6475 OP_RVC
, /* VFP control register */
6476 OP_RMF
, /* Maverick F register */
6477 OP_RMD
, /* Maverick D register */
6478 OP_RMFX
, /* Maverick FX register */
6479 OP_RMDX
, /* Maverick DX register */
6480 OP_RMAX
, /* Maverick AX register */
6481 OP_RMDS
, /* Maverick DSPSC register */
6482 OP_RIWR
, /* iWMMXt wR register */
6483 OP_RIWC
, /* iWMMXt wC register */
6484 OP_RIWG
, /* iWMMXt wCG register */
6485 OP_RXA
, /* XScale accumulator register */
6487 OP_REGLST
, /* ARM register list */
6488 OP_VRSLST
, /* VFP single-precision register list */
6489 OP_VRDLST
, /* VFP double-precision register list */
6490 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6491 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6492 OP_NSTRLST
, /* Neon element/structure list */
6494 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6495 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6496 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6497 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6498 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6499 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6500 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6501 OP_VMOV
, /* Neon VMOV operands. */
6502 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6503 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6504 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6506 OP_I0
, /* immediate zero */
6507 OP_I7
, /* immediate value 0 .. 7 */
6508 OP_I15
, /* 0 .. 15 */
6509 OP_I16
, /* 1 .. 16 */
6510 OP_I16z
, /* 0 .. 16 */
6511 OP_I31
, /* 0 .. 31 */
6512 OP_I31w
, /* 0 .. 31, optional trailing ! */
6513 OP_I32
, /* 1 .. 32 */
6514 OP_I32z
, /* 0 .. 32 */
6515 OP_I63
, /* 0 .. 63 */
6516 OP_I63s
, /* -64 .. 63 */
6517 OP_I64
, /* 1 .. 64 */
6518 OP_I64z
, /* 0 .. 64 */
6519 OP_I255
, /* 0 .. 255 */
6521 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6522 OP_I7b
, /* 0 .. 7 */
6523 OP_I15b
, /* 0 .. 15 */
6524 OP_I31b
, /* 0 .. 31 */
6526 OP_SH
, /* shifter operand */
6527 OP_SHG
, /* shifter operand with possible group relocation */
6528 OP_ADDR
, /* Memory address expression (any mode) */
6529 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6530 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6531 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6532 OP_EXP
, /* arbitrary expression */
6533 OP_EXPi
, /* same, with optional immediate prefix */
6534 OP_EXPr
, /* same, with optional relocation suffix */
6535 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6537 OP_CPSF
, /* CPS flags */
6538 OP_ENDI
, /* Endianness specifier */
6539 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6540 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6541 OP_COND
, /* conditional code */
6542 OP_TB
, /* Table branch. */
6544 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6546 OP_RRnpc_I0
, /* ARM register or literal 0 */
6547 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
6548 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6549 OP_RF_IF
, /* FPA register or immediate */
6550 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6551 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6553 /* Optional operands. */
6554 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6555 OP_oI31b
, /* 0 .. 31 */
6556 OP_oI32b
, /* 1 .. 32 */
6557 OP_oI32z
, /* 0 .. 32 */
6558 OP_oIffffb
, /* 0 .. 65535 */
6559 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6561 OP_oRR
, /* ARM register */
6562 OP_oRRnpc
, /* ARM register, not the PC */
6563 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6564 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6565 OP_oRND
, /* Optional Neon double precision register */
6566 OP_oRNQ
, /* Optional Neon quad precision register */
6567 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6568 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6569 OP_oSHll
, /* LSL immediate */
6570 OP_oSHar
, /* ASR immediate */
6571 OP_oSHllar
, /* LSL or ASR immediate */
6572 OP_oROR
, /* ROR 0/8/16/24 */
6573 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6575 /* Some pre-defined mixed (ARM/THUMB) operands. */
6576 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6577 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6578 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6580 OP_FIRST_OPTIONAL
= OP_oI7b
6583 /* Generic instruction operand parser. This does no encoding and no
6584 semantic validation; it merely squirrels values away in the inst
6585 structure. Returns SUCCESS or FAIL depending on whether the
6586 specified grammar matched. */
6588 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6590 unsigned const int *upat
= pattern
;
6591 char *backtrack_pos
= 0;
6592 const char *backtrack_error
= 0;
6593 int i
, val
= 0, backtrack_index
= 0;
6594 enum arm_reg_type rtype
;
6595 parse_operand_result result
;
6596 unsigned int op_parse_code
;
6598 #define po_char_or_fail(chr) \
6601 if (skip_past_char (&str, chr) == FAIL) \
6606 #define po_reg_or_fail(regtype) \
6609 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6610 & inst.operands[i].vectype); \
6613 first_error (_(reg_expected_msgs[regtype])); \
6616 inst.operands[i].reg = val; \
6617 inst.operands[i].isreg = 1; \
6618 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6619 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6620 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6621 || rtype == REG_TYPE_VFD \
6622 || rtype == REG_TYPE_NQ); \
6626 #define po_reg_or_goto(regtype, label) \
6629 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6630 & inst.operands[i].vectype); \
6634 inst.operands[i].reg = val; \
6635 inst.operands[i].isreg = 1; \
6636 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6637 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6638 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6639 || rtype == REG_TYPE_VFD \
6640 || rtype == REG_TYPE_NQ); \
6644 #define po_imm_or_fail(min, max, popt) \
6647 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6649 inst.operands[i].imm = val; \
6653 #define po_scalar_or_goto(elsz, label) \
6656 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6659 inst.operands[i].reg = val; \
6660 inst.operands[i].isscalar = 1; \
6664 #define po_misc_or_fail(expr) \
6672 #define po_misc_or_fail_no_backtrack(expr) \
6676 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6677 backtrack_pos = 0; \
6678 if (result != PARSE_OPERAND_SUCCESS) \
6683 #define po_barrier_or_imm(str) \
6686 val = parse_barrier (&str); \
6687 if (val == FAIL && ! ISALPHA (*str)) \
6690 /* ISB can only take SY as an option. */ \
6691 || ((inst.instruction & 0xf0) == 0x60 \
6694 inst.error = _("invalid barrier type"); \
6695 backtrack_pos = 0; \
6701 skip_whitespace (str
);
6703 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6705 op_parse_code
= upat
[i
];
6706 if (op_parse_code
>= 1<<16)
6707 op_parse_code
= thumb
? (op_parse_code
>> 16)
6708 : (op_parse_code
& ((1<<16)-1));
6710 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6712 /* Remember where we are in case we need to backtrack. */
6713 gas_assert (!backtrack_pos
);
6714 backtrack_pos
= str
;
6715 backtrack_error
= inst
.error
;
6716 backtrack_index
= i
;
6719 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6720 po_char_or_fail (',');
6722 switch (op_parse_code
)
6730 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6731 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6732 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6733 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6734 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6735 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6737 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6739 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6741 /* Also accept generic coprocessor regs for unknown registers. */
6743 po_reg_or_fail (REG_TYPE_CN
);
6745 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6746 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6747 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6748 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6749 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6750 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6751 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6752 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6753 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6754 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6756 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6758 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6759 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6761 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6763 /* Neon scalar. Using an element size of 8 means that some invalid
6764 scalars are accepted here, so deal with those in later code. */
6765 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6769 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6772 po_imm_or_fail (0, 0, TRUE
);
6777 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6782 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6785 if (parse_ifimm_zero (&str
))
6786 inst
.operands
[i
].imm
= 0;
6790 = _("only floating point zero is allowed as immediate value");
6798 po_scalar_or_goto (8, try_rr
);
6801 po_reg_or_fail (REG_TYPE_RN
);
6807 po_scalar_or_goto (8, try_nsdq
);
6810 po_reg_or_fail (REG_TYPE_NSDQ
);
6816 po_scalar_or_goto (8, try_ndq
);
6819 po_reg_or_fail (REG_TYPE_NDQ
);
6825 po_scalar_or_goto (8, try_vfd
);
6828 po_reg_or_fail (REG_TYPE_VFD
);
6833 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6834 not careful then bad things might happen. */
6835 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6840 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6843 /* There's a possibility of getting a 64-bit immediate here, so
6844 we need special handling. */
6845 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6848 inst
.error
= _("immediate value is out of range");
6856 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6859 po_imm_or_fail (0, 63, TRUE
);
6864 po_char_or_fail ('[');
6865 po_reg_or_fail (REG_TYPE_RN
);
6866 po_char_or_fail (']');
6872 po_reg_or_fail (REG_TYPE_RN
);
6873 if (skip_past_char (&str
, '!') == SUCCESS
)
6874 inst
.operands
[i
].writeback
= 1;
6878 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6879 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6880 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6881 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6882 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6883 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6884 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6885 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6886 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6887 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6888 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6889 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6891 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6893 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6894 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6896 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6897 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6898 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6899 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6901 /* Immediate variants */
6903 po_char_or_fail ('{');
6904 po_imm_or_fail (0, 255, TRUE
);
6905 po_char_or_fail ('}');
6909 /* The expression parser chokes on a trailing !, so we have
6910 to find it first and zap it. */
6913 while (*s
&& *s
!= ',')
6918 inst
.operands
[i
].writeback
= 1;
6920 po_imm_or_fail (0, 31, TRUE
);
6928 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6933 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6938 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6940 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6942 val
= parse_reloc (&str
);
6945 inst
.error
= _("unrecognized relocation suffix");
6948 else if (val
!= BFD_RELOC_UNUSED
)
6950 inst
.operands
[i
].imm
= val
;
6951 inst
.operands
[i
].hasreloc
= 1;
6956 /* Operand for MOVW or MOVT. */
6958 po_misc_or_fail (parse_half (&str
));
6961 /* Register or expression. */
6962 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6963 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6965 /* Register or immediate. */
6966 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6967 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6969 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6971 if (!is_immediate_prefix (*str
))
6974 val
= parse_fpa_immediate (&str
);
6977 /* FPA immediates are encoded as registers 8-15.
6978 parse_fpa_immediate has already applied the offset. */
6979 inst
.operands
[i
].reg
= val
;
6980 inst
.operands
[i
].isreg
= 1;
6983 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6984 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6986 /* Two kinds of register. */
6989 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6991 || (rege
->type
!= REG_TYPE_MMXWR
6992 && rege
->type
!= REG_TYPE_MMXWC
6993 && rege
->type
!= REG_TYPE_MMXWCG
))
6995 inst
.error
= _("iWMMXt data or control register expected");
6998 inst
.operands
[i
].reg
= rege
->number
;
6999 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7005 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7007 || (rege
->type
!= REG_TYPE_MMXWC
7008 && rege
->type
!= REG_TYPE_MMXWCG
))
7010 inst
.error
= _("iWMMXt control register expected");
7013 inst
.operands
[i
].reg
= rege
->number
;
7014 inst
.operands
[i
].isreg
= 1;
7019 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7020 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7021 case OP_oROR
: val
= parse_ror (&str
); break;
7022 case OP_COND
: val
= parse_cond (&str
); break;
7023 case OP_oBARRIER_I15
:
7024 po_barrier_or_imm (str
); break;
7026 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7032 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7033 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7035 inst
.error
= _("Banked registers are not available with this "
7041 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7045 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7048 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7050 if (strncasecmp (str
, "APSR_", 5) == 0)
7057 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7058 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7059 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7060 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7061 default: found
= 16;
7065 inst
.operands
[i
].isvec
= 1;
7066 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7067 inst
.operands
[i
].reg
= REG_PC
;
7074 po_misc_or_fail (parse_tb (&str
));
7077 /* Register lists. */
7079 val
= parse_reg_list (&str
);
7082 inst
.operands
[i
].writeback
= 1;
7088 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7092 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7096 /* Allow Q registers too. */
7097 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7102 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7104 inst
.operands
[i
].issingle
= 1;
7109 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7114 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7115 &inst
.operands
[i
].vectype
);
7118 /* Addressing modes */
7120 po_misc_or_fail (parse_address (&str
, i
));
7124 po_misc_or_fail_no_backtrack (
7125 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7129 po_misc_or_fail_no_backtrack (
7130 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7134 po_misc_or_fail_no_backtrack (
7135 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7139 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7143 po_misc_or_fail_no_backtrack (
7144 parse_shifter_operand_group_reloc (&str
, i
));
7148 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7152 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7156 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7160 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7163 /* Various value-based sanity checks and shared operations. We
7164 do not signal immediate failures for the register constraints;
7165 this allows a syntax error to take precedence. */
7166 switch (op_parse_code
)
7174 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7175 inst
.error
= BAD_PC
;
7180 if (inst
.operands
[i
].isreg
)
7182 if (inst
.operands
[i
].reg
== REG_PC
)
7183 inst
.error
= BAD_PC
;
7184 else if (inst
.operands
[i
].reg
== REG_SP
)
7185 inst
.error
= BAD_SP
;
7190 if (inst
.operands
[i
].isreg
7191 && inst
.operands
[i
].reg
== REG_PC
7192 && (inst
.operands
[i
].writeback
|| thumb
))
7193 inst
.error
= BAD_PC
;
7202 case OP_oBARRIER_I15
:
7211 inst
.operands
[i
].imm
= val
;
7218 /* If we get here, this operand was successfully parsed. */
7219 inst
.operands
[i
].present
= 1;
7223 inst
.error
= BAD_ARGS
;
7228 /* The parse routine should already have set inst.error, but set a
7229 default here just in case. */
7231 inst
.error
= _("syntax error");
7235 /* Do not backtrack over a trailing optional argument that
7236 absorbed some text. We will only fail again, with the
7237 'garbage following instruction' error message, which is
7238 probably less helpful than the current one. */
7239 if (backtrack_index
== i
&& backtrack_pos
!= str
7240 && upat
[i
+1] == OP_stop
)
7243 inst
.error
= _("syntax error");
7247 /* Try again, skipping the optional argument at backtrack_pos. */
7248 str
= backtrack_pos
;
7249 inst
.error
= backtrack_error
;
7250 inst
.operands
[backtrack_index
].present
= 0;
7251 i
= backtrack_index
;
7255 /* Check that we have parsed all the arguments. */
7256 if (*str
!= '\0' && !inst
.error
)
7257 inst
.error
= _("garbage following instruction");
7259 return inst
.error
? FAIL
: SUCCESS
;
7262 #undef po_char_or_fail
7263 #undef po_reg_or_fail
7264 #undef po_reg_or_goto
7265 #undef po_imm_or_fail
7266 #undef po_scalar_or_fail
7267 #undef po_barrier_or_imm
7269 /* Shorthand macro for instruction encoding functions issuing errors. */
7270 #define constraint(expr, err) \
7281 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7282 instructions are unpredictable if these registers are used. This
7283 is the BadReg predicate in ARM's Thumb-2 documentation. */
7284 #define reject_bad_reg(reg) \
7286 if (reg == REG_SP || reg == REG_PC) \
7288 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7293 /* If REG is R13 (the stack pointer), warn that its use is
7295 #define warn_deprecated_sp(reg) \
7297 if (warn_on_deprecated && reg == REG_SP) \
7298 as_tsktsk (_("use of r13 is deprecated")); \
7301 /* Functions for operand encoding. ARM, then Thumb. */
7303 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7305 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7307 The only binary encoding difference is the Coprocessor number. Coprocessor
7308 9 is used for half-precision calculations or conversions. The format of the
7309 instruction is the same as the equivalent Coprocessor 10 instuction that
7310 exists for Single-Precision operation. */
7313 do_scalar_fp16_v82_encode (void)
7315 if (inst
.cond
!= COND_ALWAYS
)
7316 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7317 " the behaviour is UNPREDICTABLE"));
7318 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7321 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7322 mark_feature_used (&arm_ext_fp16
);
7325 /* If VAL can be encoded in the immediate field of an ARM instruction,
7326 return the encoded form. Otherwise, return FAIL. */
7329 encode_arm_immediate (unsigned int val
)
7336 for (i
= 2; i
< 32; i
+= 2)
7337 if ((a
= rotate_left (val
, i
)) <= 0xff)
7338 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7343 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7344 return the encoded form. Otherwise, return FAIL. */
7346 encode_thumb32_immediate (unsigned int val
)
7353 for (i
= 1; i
<= 24; i
++)
7356 if ((val
& ~(0xff << i
)) == 0)
7357 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7361 if (val
== ((a
<< 16) | a
))
7363 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7367 if (val
== ((a
<< 16) | a
))
7368 return 0x200 | (a
>> 8);
7372 /* Encode a VFP SP or DP register number into inst.instruction. */
7375 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7377 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7380 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7383 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7386 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7391 first_error (_("D register out of range for selected VFP version"));
7399 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7403 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7407 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7411 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7415 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7419 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7427 /* Encode a <shift> in an ARM-format instruction. The immediate,
7428 if any, is handled by md_apply_fix. */
7430 encode_arm_shift (int i
)
7432 /* register-shifted register. */
7433 if (inst
.operands
[i
].immisreg
)
7436 for (index
= 0; index
<= i
; ++index
)
7438 gas_assert (inst
.operands
[index
].present
);
7439 if (inst
.operands
[index
].isreg
&& inst
.operands
[index
].reg
== REG_PC
)
7440 as_warn (UNPRED_REG ("r15"));
7443 if (inst
.operands
[i
].imm
== REG_PC
)
7444 as_warn (UNPRED_REG ("r15"));
7447 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7448 inst
.instruction
|= SHIFT_ROR
<< 5;
7451 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7452 if (inst
.operands
[i
].immisreg
)
7454 inst
.instruction
|= SHIFT_BY_REG
;
7455 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7458 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7463 encode_arm_shifter_operand (int i
)
7465 if (inst
.operands
[i
].isreg
)
7467 inst
.instruction
|= inst
.operands
[i
].reg
;
7468 encode_arm_shift (i
);
7472 inst
.instruction
|= INST_IMMEDIATE
;
7473 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7474 inst
.instruction
|= inst
.operands
[i
].imm
;
7478 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7480 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7483 Generate an error if the operand is not a register. */
7484 constraint (!inst
.operands
[i
].isreg
,
7485 _("Instruction does not support =N addresses"));
7487 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7489 if (inst
.operands
[i
].preind
)
7493 inst
.error
= _("instruction does not accept preindexed addressing");
7496 inst
.instruction
|= PRE_INDEX
;
7497 if (inst
.operands
[i
].writeback
)
7498 inst
.instruction
|= WRITE_BACK
;
7501 else if (inst
.operands
[i
].postind
)
7503 gas_assert (inst
.operands
[i
].writeback
);
7505 inst
.instruction
|= WRITE_BACK
;
7507 else /* unindexed - only for coprocessor */
7509 inst
.error
= _("instruction does not accept unindexed addressing");
7513 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7514 && (((inst
.instruction
& 0x000f0000) >> 16)
7515 == ((inst
.instruction
& 0x0000f000) >> 12)))
7516 as_warn ((inst
.instruction
& LOAD_BIT
)
7517 ? _("destination register same as write-back base")
7518 : _("source register same as write-back base"));
7521 /* inst.operands[i] was set up by parse_address. Encode it into an
7522 ARM-format mode 2 load or store instruction. If is_t is true,
7523 reject forms that cannot be used with a T instruction (i.e. not
7526 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7528 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7530 encode_arm_addr_mode_common (i
, is_t
);
7532 if (inst
.operands
[i
].immisreg
)
7534 constraint ((inst
.operands
[i
].imm
== REG_PC
7535 || (is_pc
&& inst
.operands
[i
].writeback
)),
7537 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7538 inst
.instruction
|= inst
.operands
[i
].imm
;
7539 if (!inst
.operands
[i
].negative
)
7540 inst
.instruction
|= INDEX_UP
;
7541 if (inst
.operands
[i
].shifted
)
7543 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7544 inst
.instruction
|= SHIFT_ROR
<< 5;
7547 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7548 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7552 else /* immediate offset in inst.reloc */
7554 if (is_pc
&& !inst
.reloc
.pc_rel
)
7556 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7558 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7559 cannot use PC in addressing.
7560 PC cannot be used in writeback addressing, either. */
7561 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7564 /* Use of PC in str is deprecated for ARMv7. */
7565 if (warn_on_deprecated
7567 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7568 as_tsktsk (_("use of PC in this instruction is deprecated"));
7571 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7573 /* Prefer + for zero encoded value. */
7574 if (!inst
.operands
[i
].negative
)
7575 inst
.instruction
|= INDEX_UP
;
7576 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7581 /* inst.operands[i] was set up by parse_address. Encode it into an
7582 ARM-format mode 3 load or store instruction. Reject forms that
7583 cannot be used with such instructions. If is_t is true, reject
7584 forms that cannot be used with a T instruction (i.e. not
7587 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7589 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7591 inst
.error
= _("instruction does not accept scaled register index");
7595 encode_arm_addr_mode_common (i
, is_t
);
7597 if (inst
.operands
[i
].immisreg
)
7599 constraint ((inst
.operands
[i
].imm
== REG_PC
7600 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7602 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7604 inst
.instruction
|= inst
.operands
[i
].imm
;
7605 if (!inst
.operands
[i
].negative
)
7606 inst
.instruction
|= INDEX_UP
;
7608 else /* immediate offset in inst.reloc */
7610 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7611 && inst
.operands
[i
].writeback
),
7613 inst
.instruction
|= HWOFFSET_IMM
;
7614 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7616 /* Prefer + for zero encoded value. */
7617 if (!inst
.operands
[i
].negative
)
7618 inst
.instruction
|= INDEX_UP
;
7620 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7625 /* Write immediate bits [7:0] to the following locations:
7627 |28/24|23 19|18 16|15 4|3 0|
7628 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7630 This function is used by VMOV/VMVN/VORR/VBIC. */
7633 neon_write_immbits (unsigned immbits
)
7635 inst
.instruction
|= immbits
& 0xf;
7636 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7637 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7640 /* Invert low-order SIZE bits of XHI:XLO. */
7643 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7645 unsigned immlo
= xlo
? *xlo
: 0;
7646 unsigned immhi
= xhi
? *xhi
: 0;
7651 immlo
= (~immlo
) & 0xff;
7655 immlo
= (~immlo
) & 0xffff;
7659 immhi
= (~immhi
) & 0xffffffff;
7663 immlo
= (~immlo
) & 0xffffffff;
7677 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7681 neon_bits_same_in_bytes (unsigned imm
)
7683 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7684 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7685 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7686 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7689 /* For immediate of above form, return 0bABCD. */
7692 neon_squash_bits (unsigned imm
)
7694 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7695 | ((imm
& 0x01000000) >> 21);
7698 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7701 neon_qfloat_bits (unsigned imm
)
7703 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7706 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7707 the instruction. *OP is passed as the initial value of the op field, and
7708 may be set to a different value depending on the constant (i.e.
7709 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7710 MVN). If the immediate looks like a repeated pattern then also
7711 try smaller element sizes. */
7714 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7715 unsigned *immbits
, int *op
, int size
,
7716 enum neon_el_type type
)
7718 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7720 if (type
== NT_float
&& !float_p
)
7723 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7725 if (size
!= 32 || *op
== 1)
7727 *immbits
= neon_qfloat_bits (immlo
);
7733 if (neon_bits_same_in_bytes (immhi
)
7734 && neon_bits_same_in_bytes (immlo
))
7738 *immbits
= (neon_squash_bits (immhi
) << 4)
7739 | neon_squash_bits (immlo
);
7750 if (immlo
== (immlo
& 0x000000ff))
7755 else if (immlo
== (immlo
& 0x0000ff00))
7757 *immbits
= immlo
>> 8;
7760 else if (immlo
== (immlo
& 0x00ff0000))
7762 *immbits
= immlo
>> 16;
7765 else if (immlo
== (immlo
& 0xff000000))
7767 *immbits
= immlo
>> 24;
7770 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7772 *immbits
= (immlo
>> 8) & 0xff;
7775 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7777 *immbits
= (immlo
>> 16) & 0xff;
7781 if ((immlo
& 0xffff) != (immlo
>> 16))
7788 if (immlo
== (immlo
& 0x000000ff))
7793 else if (immlo
== (immlo
& 0x0000ff00))
7795 *immbits
= immlo
>> 8;
7799 if ((immlo
& 0xff) != (immlo
>> 8))
7804 if (immlo
== (immlo
& 0x000000ff))
7806 /* Don't allow MVN with 8-bit immediate. */
7816 #if defined BFD_HOST_64_BIT
7817 /* Returns TRUE if double precision value V may be cast
7818 to single precision without loss of accuracy. */
7821 is_double_a_single (bfd_int64_t v
)
7823 int exp
= (int)((v
>> 52) & 0x7FF);
7824 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7826 return (exp
== 0 || exp
== 0x7FF
7827 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7828 && (mantissa
& 0x1FFFFFFFl
) == 0;
7831 /* Returns a double precision value casted to single precision
7832 (ignoring the least significant bits in exponent and mantissa). */
7835 double_to_single (bfd_int64_t v
)
7837 int sign
= (int) ((v
>> 63) & 1l);
7838 int exp
= (int) ((v
>> 52) & 0x7FF);
7839 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7845 exp
= exp
- 1023 + 127;
7854 /* No denormalized numbers. */
7860 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7862 #endif /* BFD_HOST_64_BIT */
7871 static void do_vfp_nsyn_opcode (const char *);
7873 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7874 Determine whether it can be performed with a move instruction; if
7875 it can, convert inst.instruction to that move instruction and
7876 return TRUE; if it can't, convert inst.instruction to a literal-pool
7877 load and return FALSE. If this is not a valid thing to do in the
7878 current context, set inst.error and return TRUE.
7880 inst.operands[i] describes the destination register. */
7883 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7886 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7887 bfd_boolean arm_p
= (t
== CONST_ARM
);
7890 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7894 if ((inst
.instruction
& tbit
) == 0)
7896 inst
.error
= _("invalid pseudo operation");
7900 if (inst
.reloc
.exp
.X_op
!= O_constant
7901 && inst
.reloc
.exp
.X_op
!= O_symbol
7902 && inst
.reloc
.exp
.X_op
!= O_big
)
7904 inst
.error
= _("constant expression expected");
7908 if (inst
.reloc
.exp
.X_op
== O_constant
7909 || inst
.reloc
.exp
.X_op
== O_big
)
7911 #if defined BFD_HOST_64_BIT
7916 if (inst
.reloc
.exp
.X_op
== O_big
)
7918 LITTLENUM_TYPE w
[X_PRECISION
];
7921 if (inst
.reloc
.exp
.X_add_number
== -1)
7923 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7925 /* FIXME: Should we check words w[2..5] ? */
7930 #if defined BFD_HOST_64_BIT
7932 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7933 << LITTLENUM_NUMBER_OF_BITS
)
7934 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7935 << LITTLENUM_NUMBER_OF_BITS
)
7936 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7937 << LITTLENUM_NUMBER_OF_BITS
)
7938 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7940 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7941 | (l
[0] & LITTLENUM_MASK
);
7945 v
= inst
.reloc
.exp
.X_add_number
;
7947 if (!inst
.operands
[i
].issingle
)
7951 /* This can be encoded only for a low register. */
7952 if ((v
& ~0xFF) == 0 && (inst
.operands
[i
].reg
< 8))
7954 /* This can be done with a mov(1) instruction. */
7955 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7956 inst
.instruction
|= v
;
7960 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7961 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7963 /* Check if on thumb2 it can be done with a mov.w, mvn or
7964 movw instruction. */
7965 unsigned int newimm
;
7966 bfd_boolean isNegated
;
7968 newimm
= encode_thumb32_immediate (v
);
7969 if (newimm
!= (unsigned int) FAIL
)
7973 newimm
= encode_thumb32_immediate (~v
);
7974 if (newimm
!= (unsigned int) FAIL
)
7978 /* The number can be loaded with a mov.w or mvn
7980 if (newimm
!= (unsigned int) FAIL
7981 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
7983 inst
.instruction
= (0xf04f0000 /* MOV.W. */
7984 | (inst
.operands
[i
].reg
<< 8));
7985 /* Change to MOVN. */
7986 inst
.instruction
|= (isNegated
? 0x200000 : 0);
7987 inst
.instruction
|= (newimm
& 0x800) << 15;
7988 inst
.instruction
|= (newimm
& 0x700) << 4;
7989 inst
.instruction
|= (newimm
& 0x0ff);
7992 /* The number can be loaded with a movw instruction. */
7993 else if ((v
& ~0xFFFF) == 0
7994 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7996 int imm
= v
& 0xFFFF;
7998 inst
.instruction
= 0xf2400000; /* MOVW. */
7999 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8000 inst
.instruction
|= (imm
& 0xf000) << 4;
8001 inst
.instruction
|= (imm
& 0x0800) << 15;
8002 inst
.instruction
|= (imm
& 0x0700) << 4;
8003 inst
.instruction
|= (imm
& 0x00ff);
8010 int value
= encode_arm_immediate (v
);
8014 /* This can be done with a mov instruction. */
8015 inst
.instruction
&= LITERAL_MASK
;
8016 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8017 inst
.instruction
|= value
& 0xfff;
8021 value
= encode_arm_immediate (~ v
);
8024 /* This can be done with a mvn instruction. */
8025 inst
.instruction
&= LITERAL_MASK
;
8026 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8027 inst
.instruction
|= value
& 0xfff;
8031 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8034 unsigned immbits
= 0;
8035 unsigned immlo
= inst
.operands
[1].imm
;
8036 unsigned immhi
= inst
.operands
[1].regisimm
8037 ? inst
.operands
[1].reg
8038 : inst
.reloc
.exp
.X_unsigned
8040 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8041 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8042 &op
, 64, NT_invtype
);
8046 neon_invert_size (&immlo
, &immhi
, 64);
8048 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8049 &op
, 64, NT_invtype
);
8054 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8060 /* Fill other bits in vmov encoding for both thumb and arm. */
8062 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8064 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8065 neon_write_immbits (immbits
);
8073 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8074 if (inst
.operands
[i
].issingle
8075 && is_quarter_float (inst
.operands
[1].imm
)
8076 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8078 inst
.operands
[1].imm
=
8079 neon_qfloat_bits (v
);
8080 do_vfp_nsyn_opcode ("fconsts");
8084 /* If our host does not support a 64-bit type then we cannot perform
8085 the following optimization. This mean that there will be a
8086 discrepancy between the output produced by an assembler built for
8087 a 32-bit-only host and the output produced from a 64-bit host, but
8088 this cannot be helped. */
8089 #if defined BFD_HOST_64_BIT
8090 else if (!inst
.operands
[1].issingle
8091 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8093 if (is_double_a_single (v
)
8094 && is_quarter_float (double_to_single (v
)))
8096 inst
.operands
[1].imm
=
8097 neon_qfloat_bits (double_to_single (v
));
8098 do_vfp_nsyn_opcode ("fconstd");
8106 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8107 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8110 inst
.operands
[1].reg
= REG_PC
;
8111 inst
.operands
[1].isreg
= 1;
8112 inst
.operands
[1].preind
= 1;
8113 inst
.reloc
.pc_rel
= 1;
8114 inst
.reloc
.type
= (thumb_p
8115 ? BFD_RELOC_ARM_THUMB_OFFSET
8117 ? BFD_RELOC_ARM_HWLITERAL
8118 : BFD_RELOC_ARM_LITERAL
));
8122 /* inst.operands[i] was set up by parse_address. Encode it into an
8123 ARM-format instruction. Reject all forms which cannot be encoded
8124 into a coprocessor load/store instruction. If wb_ok is false,
8125 reject use of writeback; if unind_ok is false, reject use of
8126 unindexed addressing. If reloc_override is not 0, use it instead
8127 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8128 (in which case it is preserved). */
8131 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8133 if (!inst
.operands
[i
].isreg
)
8136 if (! inst
.operands
[0].isvec
)
8138 inst
.error
= _("invalid co-processor operand");
8141 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8145 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8147 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8149 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8151 gas_assert (!inst
.operands
[i
].writeback
);
8154 inst
.error
= _("instruction does not support unindexed addressing");
8157 inst
.instruction
|= inst
.operands
[i
].imm
;
8158 inst
.instruction
|= INDEX_UP
;
8162 if (inst
.operands
[i
].preind
)
8163 inst
.instruction
|= PRE_INDEX
;
8165 if (inst
.operands
[i
].writeback
)
8167 if (inst
.operands
[i
].reg
== REG_PC
)
8169 inst
.error
= _("pc may not be used with write-back");
8174 inst
.error
= _("instruction does not support writeback");
8177 inst
.instruction
|= WRITE_BACK
;
8181 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8182 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8183 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8184 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8187 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8189 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8192 /* Prefer + for zero encoded value. */
8193 if (!inst
.operands
[i
].negative
)
8194 inst
.instruction
|= INDEX_UP
;
8199 /* Functions for instruction encoding, sorted by sub-architecture.
8200 First some generics; their names are taken from the conventional
8201 bit positions for register arguments in ARM format instructions. */
8211 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8217 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8223 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8224 inst
.instruction
|= inst
.operands
[1].reg
;
8230 inst
.instruction
|= inst
.operands
[0].reg
;
8231 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8237 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8238 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8244 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8245 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8251 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8252 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8256 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8258 if (ARM_CPU_IS_ANY (cpu_variant
))
8260 as_tsktsk ("%s", msg
);
8263 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8275 unsigned Rn
= inst
.operands
[2].reg
;
8276 /* Enforce restrictions on SWP instruction. */
8277 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8279 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8280 _("Rn must not overlap other operands"));
8282 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8284 if (!check_obsolete (&arm_ext_v8
,
8285 _("swp{b} use is obsoleted for ARMv8 and later"))
8286 && warn_on_deprecated
8287 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8288 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8291 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8292 inst
.instruction
|= inst
.operands
[1].reg
;
8293 inst
.instruction
|= Rn
<< 16;
8299 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8300 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8301 inst
.instruction
|= inst
.operands
[2].reg
;
8307 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8308 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8309 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8310 || inst
.reloc
.exp
.X_add_number
!= 0),
8312 inst
.instruction
|= inst
.operands
[0].reg
;
8313 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8314 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8320 inst
.instruction
|= inst
.operands
[0].imm
;
8326 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8327 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8330 /* ARM instructions, in alphabetical order by function name (except
8331 that wrapper functions appear immediately after the function they
8334 /* This is a pseudo-op of the form "adr rd, label" to be converted
8335 into a relative address of the form "add rd, pc, #label-.-8". */
8340 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8342 /* Frag hacking will turn this into a sub instruction if the offset turns
8343 out to be negative. */
8344 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8345 inst
.reloc
.pc_rel
= 1;
8346 inst
.reloc
.exp
.X_add_number
-= 8;
8349 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8350 into a relative address of the form:
8351 add rd, pc, #low(label-.-8)"
8352 add rd, rd, #high(label-.-8)" */
8357 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8359 /* Frag hacking will turn this into a sub instruction if the offset turns
8360 out to be negative. */
8361 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8362 inst
.reloc
.pc_rel
= 1;
8363 inst
.size
= INSN_SIZE
* 2;
8364 inst
.reloc
.exp
.X_add_number
-= 8;
8370 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8371 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8373 if (!inst
.operands
[1].present
)
8374 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8375 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8376 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8377 encode_arm_shifter_operand (2);
8383 if (inst
.operands
[0].present
)
8384 inst
.instruction
|= inst
.operands
[0].imm
;
8386 inst
.instruction
|= 0xf;
8392 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8393 constraint (msb
> 32, _("bit-field extends past end of register"));
8394 /* The instruction encoding stores the LSB and MSB,
8395 not the LSB and width. */
8396 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8397 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8398 inst
.instruction
|= (msb
- 1) << 16;
8406 /* #0 in second position is alternative syntax for bfc, which is
8407 the same instruction but with REG_PC in the Rm field. */
8408 if (!inst
.operands
[1].isreg
)
8409 inst
.operands
[1].reg
= REG_PC
;
8411 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8412 constraint (msb
> 32, _("bit-field extends past end of register"));
8413 /* The instruction encoding stores the LSB and MSB,
8414 not the LSB and width. */
8415 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8416 inst
.instruction
|= inst
.operands
[1].reg
;
8417 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8418 inst
.instruction
|= (msb
- 1) << 16;
8424 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8425 _("bit-field extends past end of register"));
8426 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8427 inst
.instruction
|= inst
.operands
[1].reg
;
8428 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8429 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8432 /* ARM V5 breakpoint instruction (argument parse)
8433 BKPT <16 bit unsigned immediate>
8434 Instruction is not conditional.
8435 The bit pattern given in insns[] has the COND_ALWAYS condition,
8436 and it is an error if the caller tried to override that. */
8441 /* Top 12 of 16 bits to bits 19:8. */
8442 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8444 /* Bottom 4 of 16 bits to bits 3:0. */
8445 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8449 encode_branch (int default_reloc
)
8451 if (inst
.operands
[0].hasreloc
)
8453 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8454 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8455 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8456 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8457 ? BFD_RELOC_ARM_PLT32
8458 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8461 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8462 inst
.reloc
.pc_rel
= 1;
8469 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8470 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8473 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8480 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8482 if (inst
.cond
== COND_ALWAYS
)
8483 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8485 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8489 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8492 /* ARM V5 branch-link-exchange instruction (argument parse)
8493 BLX <target_addr> ie BLX(1)
8494 BLX{<condition>} <Rm> ie BLX(2)
8495 Unfortunately, there are two different opcodes for this mnemonic.
8496 So, the insns[].value is not used, and the code here zaps values
8497 into inst.instruction.
8498 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8503 if (inst
.operands
[0].isreg
)
8505 /* Arg is a register; the opcode provided by insns[] is correct.
8506 It is not illegal to do "blx pc", just useless. */
8507 if (inst
.operands
[0].reg
== REG_PC
)
8508 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8510 inst
.instruction
|= inst
.operands
[0].reg
;
8514 /* Arg is an address; this instruction cannot be executed
8515 conditionally, and the opcode must be adjusted.
8516 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8517 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8518 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8519 inst
.instruction
= 0xfa000000;
8520 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8527 bfd_boolean want_reloc
;
8529 if (inst
.operands
[0].reg
== REG_PC
)
8530 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8532 inst
.instruction
|= inst
.operands
[0].reg
;
8533 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8534 it is for ARMv4t or earlier. */
8535 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8536 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8540 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8545 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8549 /* ARM v5TEJ. Jump to Jazelle code. */
8554 if (inst
.operands
[0].reg
== REG_PC
)
8555 as_tsktsk (_("use of r15 in bxj is not really useful"));
8557 inst
.instruction
|= inst
.operands
[0].reg
;
8560 /* Co-processor data operation:
8561 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8562 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8566 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8567 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8568 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8569 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8570 inst
.instruction
|= inst
.operands
[4].reg
;
8571 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8577 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8578 encode_arm_shifter_operand (1);
8581 /* Transfer between coprocessor and ARM registers.
8582 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8587 No special properties. */
8589 struct deprecated_coproc_regs_s
8596 arm_feature_set deprecated
;
8597 arm_feature_set obsoleted
;
8598 const char *dep_msg
;
8599 const char *obs_msg
;
8602 #define DEPR_ACCESS_V8 \
8603 N_("This coprocessor register access is deprecated in ARMv8")
8605 /* Table of all deprecated coprocessor registers. */
8606 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8608 {15, 0, 7, 10, 5, /* CP15DMB. */
8609 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8610 DEPR_ACCESS_V8
, NULL
},
8611 {15, 0, 7, 10, 4, /* CP15DSB. */
8612 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8613 DEPR_ACCESS_V8
, NULL
},
8614 {15, 0, 7, 5, 4, /* CP15ISB. */
8615 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8616 DEPR_ACCESS_V8
, NULL
},
8617 {14, 6, 1, 0, 0, /* TEEHBR. */
8618 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8619 DEPR_ACCESS_V8
, NULL
},
8620 {14, 6, 0, 0, 0, /* TEECR. */
8621 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8622 DEPR_ACCESS_V8
, NULL
},
8625 #undef DEPR_ACCESS_V8
8627 static const size_t deprecated_coproc_reg_count
=
8628 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8636 Rd
= inst
.operands
[2].reg
;
8639 if (inst
.instruction
== 0xee000010
8640 || inst
.instruction
== 0xfe000010)
8642 reject_bad_reg (Rd
);
8645 constraint (Rd
== REG_SP
, BAD_SP
);
8650 if (inst
.instruction
== 0xe000010)
8651 constraint (Rd
== REG_PC
, BAD_PC
);
8654 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8656 const struct deprecated_coproc_regs_s
*r
=
8657 deprecated_coproc_regs
+ i
;
8659 if (inst
.operands
[0].reg
== r
->cp
8660 && inst
.operands
[1].imm
== r
->opc1
8661 && inst
.operands
[3].reg
== r
->crn
8662 && inst
.operands
[4].reg
== r
->crm
8663 && inst
.operands
[5].imm
== r
->opc2
)
8665 if (! ARM_CPU_IS_ANY (cpu_variant
)
8666 && warn_on_deprecated
8667 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8668 as_tsktsk ("%s", r
->dep_msg
);
8672 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8673 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8674 inst
.instruction
|= Rd
<< 12;
8675 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8676 inst
.instruction
|= inst
.operands
[4].reg
;
8677 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8680 /* Transfer between coprocessor register and pair of ARM registers.
8681 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8686 Two XScale instructions are special cases of these:
8688 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8689 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8691 Result unpredictable if Rd or Rn is R15. */
8698 Rd
= inst
.operands
[2].reg
;
8699 Rn
= inst
.operands
[3].reg
;
8703 reject_bad_reg (Rd
);
8704 reject_bad_reg (Rn
);
8708 constraint (Rd
== REG_PC
, BAD_PC
);
8709 constraint (Rn
== REG_PC
, BAD_PC
);
8712 /* Only check the MRRC{2} variants. */
8713 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
8715 /* If Rd == Rn, error that the operation is
8716 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8717 constraint (Rd
== Rn
, BAD_OVERLAP
);
8720 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8721 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8722 inst
.instruction
|= Rd
<< 12;
8723 inst
.instruction
|= Rn
<< 16;
8724 inst
.instruction
|= inst
.operands
[4].reg
;
8730 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8731 if (inst
.operands
[1].present
)
8733 inst
.instruction
|= CPSI_MMOD
;
8734 inst
.instruction
|= inst
.operands
[1].imm
;
8741 inst
.instruction
|= inst
.operands
[0].imm
;
8747 unsigned Rd
, Rn
, Rm
;
8749 Rd
= inst
.operands
[0].reg
;
8750 Rn
= (inst
.operands
[1].present
8751 ? inst
.operands
[1].reg
: Rd
);
8752 Rm
= inst
.operands
[2].reg
;
8754 constraint ((Rd
== REG_PC
), BAD_PC
);
8755 constraint ((Rn
== REG_PC
), BAD_PC
);
8756 constraint ((Rm
== REG_PC
), BAD_PC
);
8758 inst
.instruction
|= Rd
<< 16;
8759 inst
.instruction
|= Rn
<< 0;
8760 inst
.instruction
|= Rm
<< 8;
8766 /* There is no IT instruction in ARM mode. We
8767 process it to do the validation as if in
8768 thumb mode, just in case the code gets
8769 assembled for thumb using the unified syntax. */
8774 set_it_insn_type (IT_INSN
);
8775 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8776 now_it
.cc
= inst
.operands
[0].imm
;
8780 /* If there is only one register in the register list,
8781 then return its register number. Otherwise return -1. */
8783 only_one_reg_in_list (int range
)
8785 int i
= ffs (range
) - 1;
8786 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8790 encode_ldmstm(int from_push_pop_mnem
)
8792 int base_reg
= inst
.operands
[0].reg
;
8793 int range
= inst
.operands
[1].imm
;
8796 inst
.instruction
|= base_reg
<< 16;
8797 inst
.instruction
|= range
;
8799 if (inst
.operands
[1].writeback
)
8800 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8802 if (inst
.operands
[0].writeback
)
8804 inst
.instruction
|= WRITE_BACK
;
8805 /* Check for unpredictable uses of writeback. */
8806 if (inst
.instruction
& LOAD_BIT
)
8808 /* Not allowed in LDM type 2. */
8809 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8810 && ((range
& (1 << REG_PC
)) == 0))
8811 as_warn (_("writeback of base register is UNPREDICTABLE"));
8812 /* Only allowed if base reg not in list for other types. */
8813 else if (range
& (1 << base_reg
))
8814 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8818 /* Not allowed for type 2. */
8819 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8820 as_warn (_("writeback of base register is UNPREDICTABLE"));
8821 /* Only allowed if base reg not in list, or first in list. */
8822 else if ((range
& (1 << base_reg
))
8823 && (range
& ((1 << base_reg
) - 1)))
8824 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8828 /* If PUSH/POP has only one register, then use the A2 encoding. */
8829 one_reg
= only_one_reg_in_list (range
);
8830 if (from_push_pop_mnem
&& one_reg
>= 0)
8832 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8834 inst
.instruction
&= A_COND_MASK
;
8835 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8836 inst
.instruction
|= one_reg
<< 12;
8843 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8846 /* ARMv5TE load-consecutive (argument parse)
8855 constraint (inst
.operands
[0].reg
% 2 != 0,
8856 _("first transfer register must be even"));
8857 constraint (inst
.operands
[1].present
8858 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8859 _("can only transfer two consecutive registers"));
8860 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8861 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8863 if (!inst
.operands
[1].present
)
8864 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8866 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8867 register and the first register written; we have to diagnose
8868 overlap between the base and the second register written here. */
8870 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8871 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8872 as_warn (_("base register written back, and overlaps "
8873 "second transfer register"));
8875 if (!(inst
.instruction
& V4_STR_BIT
))
8877 /* For an index-register load, the index register must not overlap the
8878 destination (even if not write-back). */
8879 if (inst
.operands
[2].immisreg
8880 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8881 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8882 as_warn (_("index register overlaps transfer register"));
8884 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8885 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8891 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8892 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8893 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8894 || inst
.operands
[1].negative
8895 /* This can arise if the programmer has written
8897 or if they have mistakenly used a register name as the last
8900 It is very difficult to distinguish between these two cases
8901 because "rX" might actually be a label. ie the register
8902 name has been occluded by a symbol of the same name. So we
8903 just generate a general 'bad addressing mode' type error
8904 message and leave it up to the programmer to discover the
8905 true cause and fix their mistake. */
8906 || (inst
.operands
[1].reg
== REG_PC
),
8909 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8910 || inst
.reloc
.exp
.X_add_number
!= 0,
8911 _("offset must be zero in ARM encoding"));
8913 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8915 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8916 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8917 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8923 constraint (inst
.operands
[0].reg
% 2 != 0,
8924 _("even register required"));
8925 constraint (inst
.operands
[1].present
8926 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8927 _("can only load two consecutive registers"));
8928 /* If op 1 were present and equal to PC, this function wouldn't
8929 have been called in the first place. */
8930 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8932 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8933 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8936 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8937 which is not a multiple of four is UNPREDICTABLE. */
8939 check_ldr_r15_aligned (void)
8941 constraint (!(inst
.operands
[1].immisreg
)
8942 && (inst
.operands
[0].reg
== REG_PC
8943 && inst
.operands
[1].reg
== REG_PC
8944 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8945 _("ldr to register 15 must be 4-byte alligned"));
8951 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8952 if (!inst
.operands
[1].isreg
)
8953 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8955 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8956 check_ldr_r15_aligned ();
8962 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8964 if (inst
.operands
[1].preind
)
8966 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8967 || inst
.reloc
.exp
.X_add_number
!= 0,
8968 _("this instruction requires a post-indexed address"));
8970 inst
.operands
[1].preind
= 0;
8971 inst
.operands
[1].postind
= 1;
8972 inst
.operands
[1].writeback
= 1;
8974 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8975 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
8978 /* Halfword and signed-byte load/store operations. */
8983 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8984 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8985 if (!inst
.operands
[1].isreg
)
8986 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
8988 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
8994 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8996 if (inst
.operands
[1].preind
)
8998 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8999 || inst
.reloc
.exp
.X_add_number
!= 0,
9000 _("this instruction requires a post-indexed address"));
9002 inst
.operands
[1].preind
= 0;
9003 inst
.operands
[1].postind
= 1;
9004 inst
.operands
[1].writeback
= 1;
9006 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9007 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9010 /* Co-processor register load/store.
9011 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9015 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9016 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9017 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9023 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9024 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9025 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9026 && !(inst
.instruction
& 0x00400000))
9027 as_tsktsk (_("Rd and Rm should be different in mla"));
9029 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9030 inst
.instruction
|= inst
.operands
[1].reg
;
9031 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9032 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9038 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9039 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9041 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9042 encode_arm_shifter_operand (1);
9045 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9052 top
= (inst
.instruction
& 0x00400000) != 0;
9053 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
9054 _(":lower16: not allowed this instruction"));
9055 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
9056 _(":upper16: not allowed instruction"));
9057 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9058 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9060 imm
= inst
.reloc
.exp
.X_add_number
;
9061 /* The value is in two pieces: 0:11, 16:19. */
9062 inst
.instruction
|= (imm
& 0x00000fff);
9063 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9068 do_vfp_nsyn_mrs (void)
9070 if (inst
.operands
[0].isvec
)
9072 if (inst
.operands
[1].reg
!= 1)
9073 first_error (_("operand 1 must be FPSCR"));
9074 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9075 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9076 do_vfp_nsyn_opcode ("fmstat");
9078 else if (inst
.operands
[1].isvec
)
9079 do_vfp_nsyn_opcode ("fmrx");
9087 do_vfp_nsyn_msr (void)
9089 if (inst
.operands
[0].isvec
)
9090 do_vfp_nsyn_opcode ("fmxr");
9100 unsigned Rt
= inst
.operands
[0].reg
;
9102 if (thumb_mode
&& Rt
== REG_SP
)
9104 inst
.error
= BAD_SP
;
9108 /* APSR_ sets isvec. All other refs to PC are illegal. */
9109 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9111 inst
.error
= BAD_PC
;
9115 /* If we get through parsing the register name, we just insert the number
9116 generated into the instruction without further validation. */
9117 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9118 inst
.instruction
|= (Rt
<< 12);
9124 unsigned Rt
= inst
.operands
[1].reg
;
9127 reject_bad_reg (Rt
);
9128 else if (Rt
== REG_PC
)
9130 inst
.error
= BAD_PC
;
9134 /* If we get through parsing the register name, we just insert the number
9135 generated into the instruction without further validation. */
9136 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9137 inst
.instruction
|= (Rt
<< 12);
9145 if (do_vfp_nsyn_mrs () == SUCCESS
)
9148 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9149 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9151 if (inst
.operands
[1].isreg
)
9153 br
= inst
.operands
[1].reg
;
9154 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9155 as_bad (_("bad register for mrs"));
9159 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9160 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9162 _("'APSR', 'CPSR' or 'SPSR' expected"));
9163 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9166 inst
.instruction
|= br
;
9169 /* Two possible forms:
9170 "{C|S}PSR_<field>, Rm",
9171 "{C|S}PSR_f, #expression". */
9176 if (do_vfp_nsyn_msr () == SUCCESS
)
9179 inst
.instruction
|= inst
.operands
[0].imm
;
9180 if (inst
.operands
[1].isreg
)
9181 inst
.instruction
|= inst
.operands
[1].reg
;
9184 inst
.instruction
|= INST_IMMEDIATE
;
9185 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9186 inst
.reloc
.pc_rel
= 0;
9193 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9195 if (!inst
.operands
[2].present
)
9196 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9197 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9198 inst
.instruction
|= inst
.operands
[1].reg
;
9199 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9201 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9202 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9203 as_tsktsk (_("Rd and Rm should be different in mul"));
9206 /* Long Multiply Parser
9207 UMULL RdLo, RdHi, Rm, Rs
9208 SMULL RdLo, RdHi, Rm, Rs
9209 UMLAL RdLo, RdHi, Rm, Rs
9210 SMLAL RdLo, RdHi, Rm, Rs. */
9215 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9216 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9217 inst
.instruction
|= inst
.operands
[2].reg
;
9218 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9220 /* rdhi and rdlo must be different. */
9221 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9222 as_tsktsk (_("rdhi and rdlo must be different"));
9224 /* rdhi, rdlo and rm must all be different before armv6. */
9225 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9226 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9227 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9228 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9234 if (inst
.operands
[0].present
9235 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9237 /* Architectural NOP hints are CPSR sets with no bits selected. */
9238 inst
.instruction
&= 0xf0000000;
9239 inst
.instruction
|= 0x0320f000;
9240 if (inst
.operands
[0].present
)
9241 inst
.instruction
|= inst
.operands
[0].imm
;
9245 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9246 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9247 Condition defaults to COND_ALWAYS.
9248 Error if Rd, Rn or Rm are R15. */
9253 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9254 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9255 inst
.instruction
|= inst
.operands
[2].reg
;
9256 if (inst
.operands
[3].present
)
9257 encode_arm_shift (3);
9260 /* ARM V6 PKHTB (Argument Parse). */
9265 if (!inst
.operands
[3].present
)
9267 /* If the shift specifier is omitted, turn the instruction
9268 into pkhbt rd, rm, rn. */
9269 inst
.instruction
&= 0xfff00010;
9270 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9271 inst
.instruction
|= inst
.operands
[1].reg
;
9272 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9276 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9277 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9278 inst
.instruction
|= inst
.operands
[2].reg
;
9279 encode_arm_shift (3);
9283 /* ARMv5TE: Preload-Cache
9284 MP Extensions: Preload for write
9288 Syntactically, like LDR with B=1, W=0, L=1. */
9293 constraint (!inst
.operands
[0].isreg
,
9294 _("'[' expected after PLD mnemonic"));
9295 constraint (inst
.operands
[0].postind
,
9296 _("post-indexed expression used in preload instruction"));
9297 constraint (inst
.operands
[0].writeback
,
9298 _("writeback used in preload instruction"));
9299 constraint (!inst
.operands
[0].preind
,
9300 _("unindexed addressing used in preload instruction"));
9301 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9304 /* ARMv7: PLI <addr_mode> */
9308 constraint (!inst
.operands
[0].isreg
,
9309 _("'[' expected after PLI mnemonic"));
9310 constraint (inst
.operands
[0].postind
,
9311 _("post-indexed expression used in preload instruction"));
9312 constraint (inst
.operands
[0].writeback
,
9313 _("writeback used in preload instruction"));
9314 constraint (!inst
.operands
[0].preind
,
9315 _("unindexed addressing used in preload instruction"));
9316 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9317 inst
.instruction
&= ~PRE_INDEX
;
9323 constraint (inst
.operands
[0].writeback
,
9324 _("push/pop do not support {reglist}^"));
9325 inst
.operands
[1] = inst
.operands
[0];
9326 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9327 inst
.operands
[0].isreg
= 1;
9328 inst
.operands
[0].writeback
= 1;
9329 inst
.operands
[0].reg
= REG_SP
;
9330 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9333 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9334 word at the specified address and the following word
9336 Unconditionally executed.
9337 Error if Rn is R15. */
9342 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9343 if (inst
.operands
[0].writeback
)
9344 inst
.instruction
|= WRITE_BACK
;
9347 /* ARM V6 ssat (argument parse). */
9352 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9353 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9354 inst
.instruction
|= inst
.operands
[2].reg
;
9356 if (inst
.operands
[3].present
)
9357 encode_arm_shift (3);
9360 /* ARM V6 usat (argument parse). */
9365 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9366 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9367 inst
.instruction
|= inst
.operands
[2].reg
;
9369 if (inst
.operands
[3].present
)
9370 encode_arm_shift (3);
9373 /* ARM V6 ssat16 (argument parse). */
9378 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9379 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9380 inst
.instruction
|= inst
.operands
[2].reg
;
9386 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9387 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9388 inst
.instruction
|= inst
.operands
[2].reg
;
9391 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9392 preserving the other bits.
9394 setend <endian_specifier>, where <endian_specifier> is either
9400 if (warn_on_deprecated
9401 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9402 as_tsktsk (_("setend use is deprecated for ARMv8"));
9404 if (inst
.operands
[0].imm
)
9405 inst
.instruction
|= 0x200;
9411 unsigned int Rm
= (inst
.operands
[1].present
9412 ? inst
.operands
[1].reg
9413 : inst
.operands
[0].reg
);
9415 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9416 inst
.instruction
|= Rm
;
9417 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9419 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9420 inst
.instruction
|= SHIFT_BY_REG
;
9421 /* PR 12854: Error on extraneous shifts. */
9422 constraint (inst
.operands
[2].shifted
,
9423 _("extraneous shift as part of operand to shift insn"));
9426 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9432 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9433 inst
.reloc
.pc_rel
= 0;
9439 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9440 inst
.reloc
.pc_rel
= 0;
9446 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9447 inst
.reloc
.pc_rel
= 0;
9453 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9454 _("selected processor does not support SETPAN instruction"));
9456 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9462 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9463 _("selected processor does not support SETPAN instruction"));
9465 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9468 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9469 SMLAxy{cond} Rd,Rm,Rs,Rn
9470 SMLAWy{cond} Rd,Rm,Rs,Rn
9471 Error if any register is R15. */
9476 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9477 inst
.instruction
|= inst
.operands
[1].reg
;
9478 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9479 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9482 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9483 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9484 Error if any register is R15.
9485 Warning if Rdlo == Rdhi. */
9490 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9491 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9492 inst
.instruction
|= inst
.operands
[2].reg
;
9493 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9495 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9496 as_tsktsk (_("rdhi and rdlo must be different"));
9499 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9500 SMULxy{cond} Rd,Rm,Rs
9501 Error if any register is R15. */
9506 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9507 inst
.instruction
|= inst
.operands
[1].reg
;
9508 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9511 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9512 the same for both ARM and Thumb-2. */
9519 if (inst
.operands
[0].present
)
9521 reg
= inst
.operands
[0].reg
;
9522 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9527 inst
.instruction
|= reg
<< 16;
9528 inst
.instruction
|= inst
.operands
[1].imm
;
9529 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9530 inst
.instruction
|= WRITE_BACK
;
9533 /* ARM V6 strex (argument parse). */
9538 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9539 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9540 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9541 || inst
.operands
[2].negative
9542 /* See comment in do_ldrex(). */
9543 || (inst
.operands
[2].reg
== REG_PC
),
9546 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9547 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9549 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9550 || inst
.reloc
.exp
.X_add_number
!= 0,
9551 _("offset must be zero in ARM encoding"));
9553 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9554 inst
.instruction
|= inst
.operands
[1].reg
;
9555 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9556 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9562 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9563 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9564 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9565 || inst
.operands
[2].negative
,
9568 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9569 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9577 constraint (inst
.operands
[1].reg
% 2 != 0,
9578 _("even register required"));
9579 constraint (inst
.operands
[2].present
9580 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9581 _("can only store two consecutive registers"));
9582 /* If op 2 were present and equal to PC, this function wouldn't
9583 have been called in the first place. */
9584 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9586 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9587 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9588 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9591 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9592 inst
.instruction
|= inst
.operands
[1].reg
;
9593 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9600 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9601 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9609 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9610 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9615 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9616 extends it to 32-bits, and adds the result to a value in another
9617 register. You can specify a rotation by 0, 8, 16, or 24 bits
9618 before extracting the 16-bit value.
9619 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9620 Condition defaults to COND_ALWAYS.
9621 Error if any register uses R15. */
9626 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9627 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9628 inst
.instruction
|= inst
.operands
[2].reg
;
9629 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9634 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9635 Condition defaults to COND_ALWAYS.
9636 Error if any register uses R15. */
9641 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9642 inst
.instruction
|= inst
.operands
[1].reg
;
9643 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9646 /* VFP instructions. In a logical order: SP variant first, monad
9647 before dyad, arithmetic then move then load/store. */
9650 do_vfp_sp_monadic (void)
9652 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9653 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9657 do_vfp_sp_dyadic (void)
9659 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9660 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9661 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9665 do_vfp_sp_compare_z (void)
9667 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9671 do_vfp_dp_sp_cvt (void)
9673 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9674 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9678 do_vfp_sp_dp_cvt (void)
9680 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9681 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9685 do_vfp_reg_from_sp (void)
9687 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9688 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9692 do_vfp_reg2_from_sp2 (void)
9694 constraint (inst
.operands
[2].imm
!= 2,
9695 _("only two consecutive VFP SP registers allowed here"));
9696 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9697 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9698 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9702 do_vfp_sp_from_reg (void)
9704 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9705 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9709 do_vfp_sp2_from_reg2 (void)
9711 constraint (inst
.operands
[0].imm
!= 2,
9712 _("only two consecutive VFP SP registers allowed here"));
9713 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9714 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9715 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9719 do_vfp_sp_ldst (void)
9721 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9722 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9726 do_vfp_dp_ldst (void)
9728 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9729 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9734 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9736 if (inst
.operands
[0].writeback
)
9737 inst
.instruction
|= WRITE_BACK
;
9739 constraint (ldstm_type
!= VFP_LDSTMIA
,
9740 _("this addressing mode requires base-register writeback"));
9741 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9742 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9743 inst
.instruction
|= inst
.operands
[1].imm
;
9747 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9751 if (inst
.operands
[0].writeback
)
9752 inst
.instruction
|= WRITE_BACK
;
9754 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9755 _("this addressing mode requires base-register writeback"));
9757 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9758 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9760 count
= inst
.operands
[1].imm
<< 1;
9761 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9764 inst
.instruction
|= count
;
9768 do_vfp_sp_ldstmia (void)
9770 vfp_sp_ldstm (VFP_LDSTMIA
);
9774 do_vfp_sp_ldstmdb (void)
9776 vfp_sp_ldstm (VFP_LDSTMDB
);
9780 do_vfp_dp_ldstmia (void)
9782 vfp_dp_ldstm (VFP_LDSTMIA
);
9786 do_vfp_dp_ldstmdb (void)
9788 vfp_dp_ldstm (VFP_LDSTMDB
);
9792 do_vfp_xp_ldstmia (void)
9794 vfp_dp_ldstm (VFP_LDSTMIAX
);
9798 do_vfp_xp_ldstmdb (void)
9800 vfp_dp_ldstm (VFP_LDSTMDBX
);
9804 do_vfp_dp_rd_rm (void)
9806 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9807 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9811 do_vfp_dp_rn_rd (void)
9813 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9814 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9818 do_vfp_dp_rd_rn (void)
9820 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9821 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9825 do_vfp_dp_rd_rn_rm (void)
9827 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9828 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9829 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9835 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9839 do_vfp_dp_rm_rd_rn (void)
9841 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9842 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9843 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9846 /* VFPv3 instructions. */
9848 do_vfp_sp_const (void)
9850 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9851 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9852 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9856 do_vfp_dp_const (void)
9858 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9859 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9860 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9864 vfp_conv (int srcsize
)
9866 int immbits
= srcsize
- inst
.operands
[1].imm
;
9868 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9870 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9871 i.e. immbits must be in range 0 - 16. */
9872 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9875 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9877 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9878 i.e. immbits must be in range 0 - 31. */
9879 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9883 inst
.instruction
|= (immbits
& 1) << 5;
9884 inst
.instruction
|= (immbits
>> 1);
9888 do_vfp_sp_conv_16 (void)
9890 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9895 do_vfp_dp_conv_16 (void)
9897 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9902 do_vfp_sp_conv_32 (void)
9904 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9909 do_vfp_dp_conv_32 (void)
9911 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9915 /* FPA instructions. Also in a logical order. */
9920 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9921 inst
.instruction
|= inst
.operands
[1].reg
;
9925 do_fpa_ldmstm (void)
9927 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9928 switch (inst
.operands
[1].imm
)
9930 case 1: inst
.instruction
|= CP_T_X
; break;
9931 case 2: inst
.instruction
|= CP_T_Y
; break;
9932 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9937 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9939 /* The instruction specified "ea" or "fd", so we can only accept
9940 [Rn]{!}. The instruction does not really support stacking or
9941 unstacking, so we have to emulate these by setting appropriate
9942 bits and offsets. */
9943 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9944 || inst
.reloc
.exp
.X_add_number
!= 0,
9945 _("this instruction does not support indexing"));
9947 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9948 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9950 if (!(inst
.instruction
& INDEX_UP
))
9951 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9953 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9955 inst
.operands
[2].preind
= 0;
9956 inst
.operands
[2].postind
= 1;
9960 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9963 /* iWMMXt instructions: strictly in alphabetical order. */
9966 do_iwmmxt_tandorc (void)
9968 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
9972 do_iwmmxt_textrc (void)
9974 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9975 inst
.instruction
|= inst
.operands
[1].imm
;
9979 do_iwmmxt_textrm (void)
9981 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9982 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9983 inst
.instruction
|= inst
.operands
[2].imm
;
9987 do_iwmmxt_tinsr (void)
9989 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9990 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9991 inst
.instruction
|= inst
.operands
[2].imm
;
9995 do_iwmmxt_tmia (void)
9997 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
9998 inst
.instruction
|= inst
.operands
[1].reg
;
9999 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10003 do_iwmmxt_waligni (void)
10005 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10006 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10007 inst
.instruction
|= inst
.operands
[2].reg
;
10008 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10012 do_iwmmxt_wmerge (void)
10014 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10015 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10016 inst
.instruction
|= inst
.operands
[2].reg
;
10017 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10021 do_iwmmxt_wmov (void)
10023 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10024 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10025 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10026 inst
.instruction
|= inst
.operands
[1].reg
;
10030 do_iwmmxt_wldstbh (void)
10033 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10035 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10037 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10038 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10042 do_iwmmxt_wldstw (void)
10044 /* RIWR_RIWC clears .isreg for a control register. */
10045 if (!inst
.operands
[0].isreg
)
10047 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10048 inst
.instruction
|= 0xf0000000;
10051 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10052 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10056 do_iwmmxt_wldstd (void)
10058 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10059 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10060 && inst
.operands
[1].immisreg
)
10062 inst
.instruction
&= ~0x1a000ff;
10063 inst
.instruction
|= (0xfU
<< 28);
10064 if (inst
.operands
[1].preind
)
10065 inst
.instruction
|= PRE_INDEX
;
10066 if (!inst
.operands
[1].negative
)
10067 inst
.instruction
|= INDEX_UP
;
10068 if (inst
.operands
[1].writeback
)
10069 inst
.instruction
|= WRITE_BACK
;
10070 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10071 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10072 inst
.instruction
|= inst
.operands
[1].imm
;
10075 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10079 do_iwmmxt_wshufh (void)
10081 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10082 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10083 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10084 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10088 do_iwmmxt_wzero (void)
10090 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10091 inst
.instruction
|= inst
.operands
[0].reg
;
10092 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10093 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10097 do_iwmmxt_wrwrwr_or_imm5 (void)
10099 if (inst
.operands
[2].isreg
)
10102 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10103 _("immediate operand requires iWMMXt2"));
10105 if (inst
.operands
[2].imm
== 0)
10107 switch ((inst
.instruction
>> 20) & 0xf)
10113 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10114 inst
.operands
[2].imm
= 16;
10115 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10121 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10122 inst
.operands
[2].imm
= 32;
10123 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10130 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10132 wrn
= (inst
.instruction
>> 16) & 0xf;
10133 inst
.instruction
&= 0xff0fff0f;
10134 inst
.instruction
|= wrn
;
10135 /* Bail out here; the instruction is now assembled. */
10140 /* Map 32 -> 0, etc. */
10141 inst
.operands
[2].imm
&= 0x1f;
10142 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10146 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10147 operations first, then control, shift, and load/store. */
10149 /* Insns like "foo X,Y,Z". */
10152 do_mav_triple (void)
10154 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10155 inst
.instruction
|= inst
.operands
[1].reg
;
10156 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10159 /* Insns like "foo W,X,Y,Z".
10160 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10165 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10166 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10167 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10168 inst
.instruction
|= inst
.operands
[3].reg
;
10171 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10173 do_mav_dspsc (void)
10175 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10178 /* Maverick shift immediate instructions.
10179 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10180 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10183 do_mav_shift (void)
10185 int imm
= inst
.operands
[2].imm
;
10187 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10188 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10190 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10191 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10192 Bit 4 should be 0. */
10193 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10195 inst
.instruction
|= imm
;
10198 /* XScale instructions. Also sorted arithmetic before move. */
10200 /* Xscale multiply-accumulate (argument parse)
10203 MIAxycc acc0,Rm,Rs. */
10208 inst
.instruction
|= inst
.operands
[1].reg
;
10209 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10212 /* Xscale move-accumulator-register (argument parse)
10214 MARcc acc0,RdLo,RdHi. */
10219 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10220 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10223 /* Xscale move-register-accumulator (argument parse)
10225 MRAcc RdLo,RdHi,acc0. */
10230 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10231 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10232 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10235 /* Encoding functions relevant only to Thumb. */
10237 /* inst.operands[i] is a shifted-register operand; encode
10238 it into inst.instruction in the format used by Thumb32. */
10241 encode_thumb32_shifted_operand (int i
)
10243 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10244 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10246 constraint (inst
.operands
[i
].immisreg
,
10247 _("shift by register not allowed in thumb mode"));
10248 inst
.instruction
|= inst
.operands
[i
].reg
;
10249 if (shift
== SHIFT_RRX
)
10250 inst
.instruction
|= SHIFT_ROR
<< 4;
10253 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10254 _("expression too complex"));
10256 constraint (value
> 32
10257 || (value
== 32 && (shift
== SHIFT_LSL
10258 || shift
== SHIFT_ROR
)),
10259 _("shift expression is too large"));
10263 else if (value
== 32)
10266 inst
.instruction
|= shift
<< 4;
10267 inst
.instruction
|= (value
& 0x1c) << 10;
10268 inst
.instruction
|= (value
& 0x03) << 6;
10273 /* inst.operands[i] was set up by parse_address. Encode it into a
10274 Thumb32 format load or store instruction. Reject forms that cannot
10275 be used with such instructions. If is_t is true, reject forms that
10276 cannot be used with a T instruction; if is_d is true, reject forms
10277 that cannot be used with a D instruction. If it is a store insn,
10278 reject PC in Rn. */
10281 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10283 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10285 constraint (!inst
.operands
[i
].isreg
,
10286 _("Instruction does not support =N addresses"));
10288 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10289 if (inst
.operands
[i
].immisreg
)
10291 constraint (is_pc
, BAD_PC_ADDRESSING
);
10292 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10293 constraint (inst
.operands
[i
].negative
,
10294 _("Thumb does not support negative register indexing"));
10295 constraint (inst
.operands
[i
].postind
,
10296 _("Thumb does not support register post-indexing"));
10297 constraint (inst
.operands
[i
].writeback
,
10298 _("Thumb does not support register indexing with writeback"));
10299 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10300 _("Thumb supports only LSL in shifted register indexing"));
10302 inst
.instruction
|= inst
.operands
[i
].imm
;
10303 if (inst
.operands
[i
].shifted
)
10305 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10306 _("expression too complex"));
10307 constraint (inst
.reloc
.exp
.X_add_number
< 0
10308 || inst
.reloc
.exp
.X_add_number
> 3,
10309 _("shift out of range"));
10310 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10312 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10314 else if (inst
.operands
[i
].preind
)
10316 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10317 constraint (is_t
&& inst
.operands
[i
].writeback
,
10318 _("cannot use writeback with this instruction"));
10319 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10320 BAD_PC_ADDRESSING
);
10324 inst
.instruction
|= 0x01000000;
10325 if (inst
.operands
[i
].writeback
)
10326 inst
.instruction
|= 0x00200000;
10330 inst
.instruction
|= 0x00000c00;
10331 if (inst
.operands
[i
].writeback
)
10332 inst
.instruction
|= 0x00000100;
10334 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10336 else if (inst
.operands
[i
].postind
)
10338 gas_assert (inst
.operands
[i
].writeback
);
10339 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10340 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10343 inst
.instruction
|= 0x00200000;
10345 inst
.instruction
|= 0x00000900;
10346 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10348 else /* unindexed - only for coprocessor */
10349 inst
.error
= _("instruction does not accept unindexed addressing");
10352 /* Table of Thumb instructions which exist in both 16- and 32-bit
10353 encodings (the latter only in post-V6T2 cores). The index is the
10354 value used in the insns table below. When there is more than one
10355 possible 16-bit encoding for the instruction, this table always
10357 Also contains several pseudo-instructions used during relaxation. */
10358 #define T16_32_TAB \
10359 X(_adc, 4140, eb400000), \
10360 X(_adcs, 4140, eb500000), \
10361 X(_add, 1c00, eb000000), \
10362 X(_adds, 1c00, eb100000), \
10363 X(_addi, 0000, f1000000), \
10364 X(_addis, 0000, f1100000), \
10365 X(_add_pc,000f, f20f0000), \
10366 X(_add_sp,000d, f10d0000), \
10367 X(_adr, 000f, f20f0000), \
10368 X(_and, 4000, ea000000), \
10369 X(_ands, 4000, ea100000), \
10370 X(_asr, 1000, fa40f000), \
10371 X(_asrs, 1000, fa50f000), \
10372 X(_b, e000, f000b000), \
10373 X(_bcond, d000, f0008000), \
10374 X(_bic, 4380, ea200000), \
10375 X(_bics, 4380, ea300000), \
10376 X(_cmn, 42c0, eb100f00), \
10377 X(_cmp, 2800, ebb00f00), \
10378 X(_cpsie, b660, f3af8400), \
10379 X(_cpsid, b670, f3af8600), \
10380 X(_cpy, 4600, ea4f0000), \
10381 X(_dec_sp,80dd, f1ad0d00), \
10382 X(_eor, 4040, ea800000), \
10383 X(_eors, 4040, ea900000), \
10384 X(_inc_sp,00dd, f10d0d00), \
10385 X(_ldmia, c800, e8900000), \
10386 X(_ldr, 6800, f8500000), \
10387 X(_ldrb, 7800, f8100000), \
10388 X(_ldrh, 8800, f8300000), \
10389 X(_ldrsb, 5600, f9100000), \
10390 X(_ldrsh, 5e00, f9300000), \
10391 X(_ldr_pc,4800, f85f0000), \
10392 X(_ldr_pc2,4800, f85f0000), \
10393 X(_ldr_sp,9800, f85d0000), \
10394 X(_lsl, 0000, fa00f000), \
10395 X(_lsls, 0000, fa10f000), \
10396 X(_lsr, 0800, fa20f000), \
10397 X(_lsrs, 0800, fa30f000), \
10398 X(_mov, 2000, ea4f0000), \
10399 X(_movs, 2000, ea5f0000), \
10400 X(_mul, 4340, fb00f000), \
10401 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10402 X(_mvn, 43c0, ea6f0000), \
10403 X(_mvns, 43c0, ea7f0000), \
10404 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10405 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10406 X(_orr, 4300, ea400000), \
10407 X(_orrs, 4300, ea500000), \
10408 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10409 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10410 X(_rev, ba00, fa90f080), \
10411 X(_rev16, ba40, fa90f090), \
10412 X(_revsh, bac0, fa90f0b0), \
10413 X(_ror, 41c0, fa60f000), \
10414 X(_rors, 41c0, fa70f000), \
10415 X(_sbc, 4180, eb600000), \
10416 X(_sbcs, 4180, eb700000), \
10417 X(_stmia, c000, e8800000), \
10418 X(_str, 6000, f8400000), \
10419 X(_strb, 7000, f8000000), \
10420 X(_strh, 8000, f8200000), \
10421 X(_str_sp,9000, f84d0000), \
10422 X(_sub, 1e00, eba00000), \
10423 X(_subs, 1e00, ebb00000), \
10424 X(_subi, 8000, f1a00000), \
10425 X(_subis, 8000, f1b00000), \
10426 X(_sxtb, b240, fa4ff080), \
10427 X(_sxth, b200, fa0ff080), \
10428 X(_tst, 4200, ea100f00), \
10429 X(_uxtb, b2c0, fa5ff080), \
10430 X(_uxth, b280, fa1ff080), \
10431 X(_nop, bf00, f3af8000), \
10432 X(_yield, bf10, f3af8001), \
10433 X(_wfe, bf20, f3af8002), \
10434 X(_wfi, bf30, f3af8003), \
10435 X(_sev, bf40, f3af8004), \
10436 X(_sevl, bf50, f3af8005), \
10437 X(_udf, de00, f7f0a000)
10439 /* To catch errors in encoding functions, the codes are all offset by
10440 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10441 as 16-bit instructions. */
10442 #define X(a,b,c) T_MNEM##a
10443 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10446 #define X(a,b,c) 0x##b
10447 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10448 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10451 #define X(a,b,c) 0x##c
10452 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10453 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10454 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10458 /* Thumb instruction encoders, in alphabetical order. */
10460 /* ADDW or SUBW. */
10463 do_t_add_sub_w (void)
10467 Rd
= inst
.operands
[0].reg
;
10468 Rn
= inst
.operands
[1].reg
;
10470 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10471 is the SP-{plus,minus}-immediate form of the instruction. */
10473 constraint (Rd
== REG_PC
, BAD_PC
);
10475 reject_bad_reg (Rd
);
10477 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10478 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10481 /* Parse an add or subtract instruction. We get here with inst.instruction
10482 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10485 do_t_add_sub (void)
10489 Rd
= inst
.operands
[0].reg
;
10490 Rs
= (inst
.operands
[1].present
10491 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10492 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10495 set_it_insn_type_last ();
10497 if (unified_syntax
)
10500 bfd_boolean narrow
;
10503 flags
= (inst
.instruction
== T_MNEM_adds
10504 || inst
.instruction
== T_MNEM_subs
);
10506 narrow
= !in_it_block ();
10508 narrow
= in_it_block ();
10509 if (!inst
.operands
[2].isreg
)
10513 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10515 add
= (inst
.instruction
== T_MNEM_add
10516 || inst
.instruction
== T_MNEM_adds
);
10518 if (inst
.size_req
!= 4)
10520 /* Attempt to use a narrow opcode, with relaxation if
10522 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10523 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10524 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10525 opcode
= T_MNEM_add_sp
;
10526 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10527 opcode
= T_MNEM_add_pc
;
10528 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10531 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10533 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10537 inst
.instruction
= THUMB_OP16(opcode
);
10538 inst
.instruction
|= (Rd
<< 4) | Rs
;
10539 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10540 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10542 if (inst
.size_req
== 2)
10543 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10545 inst
.relax
= opcode
;
10549 constraint (inst
.size_req
== 2, BAD_HIREG
);
10551 if (inst
.size_req
== 4
10552 || (inst
.size_req
!= 2 && !opcode
))
10554 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10555 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
10556 THUMB1_RELOC_ONLY
);
10559 constraint (add
, BAD_PC
);
10560 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10561 _("only SUBS PC, LR, #const allowed"));
10562 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10563 _("expression too complex"));
10564 constraint (inst
.reloc
.exp
.X_add_number
< 0
10565 || inst
.reloc
.exp
.X_add_number
> 0xff,
10566 _("immediate value out of range"));
10567 inst
.instruction
= T2_SUBS_PC_LR
10568 | inst
.reloc
.exp
.X_add_number
;
10569 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10572 else if (Rs
== REG_PC
)
10574 /* Always use addw/subw. */
10575 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10576 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10580 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10581 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10584 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10586 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10588 inst
.instruction
|= Rd
<< 8;
10589 inst
.instruction
|= Rs
<< 16;
10594 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10595 unsigned int shift
= inst
.operands
[2].shift_kind
;
10597 Rn
= inst
.operands
[2].reg
;
10598 /* See if we can do this with a 16-bit instruction. */
10599 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10601 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10606 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10607 || inst
.instruction
== T_MNEM_add
)
10609 : T_OPCODE_SUB_R3
);
10610 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10614 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10616 /* Thumb-1 cores (except v6-M) require at least one high
10617 register in a narrow non flag setting add. */
10618 if (Rd
> 7 || Rn
> 7
10619 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10620 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10627 inst
.instruction
= T_OPCODE_ADD_HI
;
10628 inst
.instruction
|= (Rd
& 8) << 4;
10629 inst
.instruction
|= (Rd
& 7);
10630 inst
.instruction
|= Rn
<< 3;
10636 constraint (Rd
== REG_PC
, BAD_PC
);
10637 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10638 constraint (Rs
== REG_PC
, BAD_PC
);
10639 reject_bad_reg (Rn
);
10641 /* If we get here, it can't be done in 16 bits. */
10642 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10643 _("shift must be constant"));
10644 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10645 inst
.instruction
|= Rd
<< 8;
10646 inst
.instruction
|= Rs
<< 16;
10647 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10648 _("shift value over 3 not allowed in thumb mode"));
10649 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10650 _("only LSL shift allowed in thumb mode"));
10651 encode_thumb32_shifted_operand (2);
10656 constraint (inst
.instruction
== T_MNEM_adds
10657 || inst
.instruction
== T_MNEM_subs
,
10660 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10662 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10663 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10666 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10667 ? 0x0000 : 0x8000);
10668 inst
.instruction
|= (Rd
<< 4) | Rs
;
10669 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10673 Rn
= inst
.operands
[2].reg
;
10674 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10676 /* We now have Rd, Rs, and Rn set to registers. */
10677 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10679 /* Can't do this for SUB. */
10680 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10681 inst
.instruction
= T_OPCODE_ADD_HI
;
10682 inst
.instruction
|= (Rd
& 8) << 4;
10683 inst
.instruction
|= (Rd
& 7);
10685 inst
.instruction
|= Rn
<< 3;
10687 inst
.instruction
|= Rs
<< 3;
10689 constraint (1, _("dest must overlap one source register"));
10693 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10694 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10695 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10705 Rd
= inst
.operands
[0].reg
;
10706 reject_bad_reg (Rd
);
10708 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10710 /* Defer to section relaxation. */
10711 inst
.relax
= inst
.instruction
;
10712 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10713 inst
.instruction
|= Rd
<< 4;
10715 else if (unified_syntax
&& inst
.size_req
!= 2)
10717 /* Generate a 32-bit opcode. */
10718 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10719 inst
.instruction
|= Rd
<< 8;
10720 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10721 inst
.reloc
.pc_rel
= 1;
10725 /* Generate a 16-bit opcode. */
10726 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10727 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10728 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10729 inst
.reloc
.pc_rel
= 1;
10731 inst
.instruction
|= Rd
<< 4;
10735 /* Arithmetic instructions for which there is just one 16-bit
10736 instruction encoding, and it allows only two low registers.
10737 For maximal compatibility with ARM syntax, we allow three register
10738 operands even when Thumb-32 instructions are not available, as long
10739 as the first two are identical. For instance, both "sbc r0,r1" and
10740 "sbc r0,r0,r1" are allowed. */
10746 Rd
= inst
.operands
[0].reg
;
10747 Rs
= (inst
.operands
[1].present
10748 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10749 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10750 Rn
= inst
.operands
[2].reg
;
10752 reject_bad_reg (Rd
);
10753 reject_bad_reg (Rs
);
10754 if (inst
.operands
[2].isreg
)
10755 reject_bad_reg (Rn
);
10757 if (unified_syntax
)
10759 if (!inst
.operands
[2].isreg
)
10761 /* For an immediate, we always generate a 32-bit opcode;
10762 section relaxation will shrink it later if possible. */
10763 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10764 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10765 inst
.instruction
|= Rd
<< 8;
10766 inst
.instruction
|= Rs
<< 16;
10767 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10771 bfd_boolean narrow
;
10773 /* See if we can do this with a 16-bit instruction. */
10774 if (THUMB_SETS_FLAGS (inst
.instruction
))
10775 narrow
= !in_it_block ();
10777 narrow
= in_it_block ();
10779 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10781 if (inst
.operands
[2].shifted
)
10783 if (inst
.size_req
== 4)
10789 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10790 inst
.instruction
|= Rd
;
10791 inst
.instruction
|= Rn
<< 3;
10795 /* If we get here, it can't be done in 16 bits. */
10796 constraint (inst
.operands
[2].shifted
10797 && inst
.operands
[2].immisreg
,
10798 _("shift must be constant"));
10799 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10800 inst
.instruction
|= Rd
<< 8;
10801 inst
.instruction
|= Rs
<< 16;
10802 encode_thumb32_shifted_operand (2);
10807 /* On its face this is a lie - the instruction does set the
10808 flags. However, the only supported mnemonic in this mode
10809 says it doesn't. */
10810 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10812 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10813 _("unshifted register required"));
10814 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10815 constraint (Rd
!= Rs
,
10816 _("dest and source1 must be the same register"));
10818 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10819 inst
.instruction
|= Rd
;
10820 inst
.instruction
|= Rn
<< 3;
10824 /* Similarly, but for instructions where the arithmetic operation is
10825 commutative, so we can allow either of them to be different from
10826 the destination operand in a 16-bit instruction. For instance, all
10827 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10834 Rd
= inst
.operands
[0].reg
;
10835 Rs
= (inst
.operands
[1].present
10836 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10837 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10838 Rn
= inst
.operands
[2].reg
;
10840 reject_bad_reg (Rd
);
10841 reject_bad_reg (Rs
);
10842 if (inst
.operands
[2].isreg
)
10843 reject_bad_reg (Rn
);
10845 if (unified_syntax
)
10847 if (!inst
.operands
[2].isreg
)
10849 /* For an immediate, we always generate a 32-bit opcode;
10850 section relaxation will shrink it later if possible. */
10851 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10852 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10853 inst
.instruction
|= Rd
<< 8;
10854 inst
.instruction
|= Rs
<< 16;
10855 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10859 bfd_boolean narrow
;
10861 /* See if we can do this with a 16-bit instruction. */
10862 if (THUMB_SETS_FLAGS (inst
.instruction
))
10863 narrow
= !in_it_block ();
10865 narrow
= in_it_block ();
10867 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10869 if (inst
.operands
[2].shifted
)
10871 if (inst
.size_req
== 4)
10878 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10879 inst
.instruction
|= Rd
;
10880 inst
.instruction
|= Rn
<< 3;
10885 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10886 inst
.instruction
|= Rd
;
10887 inst
.instruction
|= Rs
<< 3;
10892 /* If we get here, it can't be done in 16 bits. */
10893 constraint (inst
.operands
[2].shifted
10894 && inst
.operands
[2].immisreg
,
10895 _("shift must be constant"));
10896 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10897 inst
.instruction
|= Rd
<< 8;
10898 inst
.instruction
|= Rs
<< 16;
10899 encode_thumb32_shifted_operand (2);
10904 /* On its face this is a lie - the instruction does set the
10905 flags. However, the only supported mnemonic in this mode
10906 says it doesn't. */
10907 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10909 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10910 _("unshifted register required"));
10911 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10913 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10914 inst
.instruction
|= Rd
;
10917 inst
.instruction
|= Rn
<< 3;
10919 inst
.instruction
|= Rs
<< 3;
10921 constraint (1, _("dest must overlap one source register"));
10929 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10930 constraint (msb
> 32, _("bit-field extends past end of register"));
10931 /* The instruction encoding stores the LSB and MSB,
10932 not the LSB and width. */
10933 Rd
= inst
.operands
[0].reg
;
10934 reject_bad_reg (Rd
);
10935 inst
.instruction
|= Rd
<< 8;
10936 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10937 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10938 inst
.instruction
|= msb
- 1;
10947 Rd
= inst
.operands
[0].reg
;
10948 reject_bad_reg (Rd
);
10950 /* #0 in second position is alternative syntax for bfc, which is
10951 the same instruction but with REG_PC in the Rm field. */
10952 if (!inst
.operands
[1].isreg
)
10956 Rn
= inst
.operands
[1].reg
;
10957 reject_bad_reg (Rn
);
10960 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
10961 constraint (msb
> 32, _("bit-field extends past end of register"));
10962 /* The instruction encoding stores the LSB and MSB,
10963 not the LSB and width. */
10964 inst
.instruction
|= Rd
<< 8;
10965 inst
.instruction
|= Rn
<< 16;
10966 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10967 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10968 inst
.instruction
|= msb
- 1;
10976 Rd
= inst
.operands
[0].reg
;
10977 Rn
= inst
.operands
[1].reg
;
10979 reject_bad_reg (Rd
);
10980 reject_bad_reg (Rn
);
10982 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
10983 _("bit-field extends past end of register"));
10984 inst
.instruction
|= Rd
<< 8;
10985 inst
.instruction
|= Rn
<< 16;
10986 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10987 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10988 inst
.instruction
|= inst
.operands
[3].imm
- 1;
10991 /* ARM V5 Thumb BLX (argument parse)
10992 BLX <target_addr> which is BLX(1)
10993 BLX <Rm> which is BLX(2)
10994 Unfortunately, there are two different opcodes for this mnemonic.
10995 So, the insns[].value is not used, and the code here zaps values
10996 into inst.instruction.
10998 ??? How to take advantage of the additional two bits of displacement
10999 available in Thumb32 mode? Need new relocation? */
11004 set_it_insn_type_last ();
11006 if (inst
.operands
[0].isreg
)
11008 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11009 /* We have a register, so this is BLX(2). */
11010 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11014 /* No register. This must be BLX(1). */
11015 inst
.instruction
= 0xf000e800;
11016 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11025 bfd_reloc_code_real_type reloc
;
11028 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
11030 if (in_it_block ())
11032 /* Conditional branches inside IT blocks are encoded as unconditional
11034 cond
= COND_ALWAYS
;
11039 if (cond
!= COND_ALWAYS
)
11040 opcode
= T_MNEM_bcond
;
11042 opcode
= inst
.instruction
;
11045 && (inst
.size_req
== 4
11046 || (inst
.size_req
!= 2
11047 && (inst
.operands
[0].hasreloc
11048 || inst
.reloc
.exp
.X_op
== O_constant
))))
11050 inst
.instruction
= THUMB_OP32(opcode
);
11051 if (cond
== COND_ALWAYS
)
11052 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11055 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11056 _("selected architecture does not support "
11057 "wide conditional branch instruction"));
11059 gas_assert (cond
!= 0xF);
11060 inst
.instruction
|= cond
<< 22;
11061 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11066 inst
.instruction
= THUMB_OP16(opcode
);
11067 if (cond
== COND_ALWAYS
)
11068 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11071 inst
.instruction
|= cond
<< 8;
11072 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11074 /* Allow section relaxation. */
11075 if (unified_syntax
&& inst
.size_req
!= 2)
11076 inst
.relax
= opcode
;
11078 inst
.reloc
.type
= reloc
;
11079 inst
.reloc
.pc_rel
= 1;
11082 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11083 between the two is the maximum immediate allowed - which is passed in
11086 do_t_bkpt_hlt1 (int range
)
11088 constraint (inst
.cond
!= COND_ALWAYS
,
11089 _("instruction is always unconditional"));
11090 if (inst
.operands
[0].present
)
11092 constraint (inst
.operands
[0].imm
> range
,
11093 _("immediate value out of range"));
11094 inst
.instruction
|= inst
.operands
[0].imm
;
11097 set_it_insn_type (NEUTRAL_IT_INSN
);
11103 do_t_bkpt_hlt1 (63);
11109 do_t_bkpt_hlt1 (255);
11113 do_t_branch23 (void)
11115 set_it_insn_type_last ();
11116 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11118 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11119 this file. We used to simply ignore the PLT reloc type here --
11120 the branch encoding is now needed to deal with TLSCALL relocs.
11121 So if we see a PLT reloc now, put it back to how it used to be to
11122 keep the preexisting behaviour. */
11123 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11124 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11126 #if defined(OBJ_COFF)
11127 /* If the destination of the branch is a defined symbol which does not have
11128 the THUMB_FUNC attribute, then we must be calling a function which has
11129 the (interfacearm) attribute. We look for the Thumb entry point to that
11130 function and change the branch to refer to that function instead. */
11131 if ( inst
.reloc
.exp
.X_op
== O_symbol
11132 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11133 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11134 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11135 inst
.reloc
.exp
.X_add_symbol
=
11136 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11143 set_it_insn_type_last ();
11144 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11145 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11146 should cause the alignment to be checked once it is known. This is
11147 because BX PC only works if the instruction is word aligned. */
11155 set_it_insn_type_last ();
11156 Rm
= inst
.operands
[0].reg
;
11157 reject_bad_reg (Rm
);
11158 inst
.instruction
|= Rm
<< 16;
11167 Rd
= inst
.operands
[0].reg
;
11168 Rm
= inst
.operands
[1].reg
;
11170 reject_bad_reg (Rd
);
11171 reject_bad_reg (Rm
);
11173 inst
.instruction
|= Rd
<< 8;
11174 inst
.instruction
|= Rm
<< 16;
11175 inst
.instruction
|= Rm
;
11181 set_it_insn_type (OUTSIDE_IT_INSN
);
11182 inst
.instruction
|= inst
.operands
[0].imm
;
11188 set_it_insn_type (OUTSIDE_IT_INSN
);
11190 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11191 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11193 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11194 inst
.instruction
= 0xf3af8000;
11195 inst
.instruction
|= imod
<< 9;
11196 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11197 if (inst
.operands
[1].present
)
11198 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11202 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11203 && (inst
.operands
[0].imm
& 4),
11204 _("selected processor does not support 'A' form "
11205 "of this instruction"));
11206 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11207 _("Thumb does not support the 2-argument "
11208 "form of this instruction"));
11209 inst
.instruction
|= inst
.operands
[0].imm
;
11213 /* THUMB CPY instruction (argument parse). */
11218 if (inst
.size_req
== 4)
11220 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11221 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11222 inst
.instruction
|= inst
.operands
[1].reg
;
11226 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11227 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11228 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11235 set_it_insn_type (OUTSIDE_IT_INSN
);
11236 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11237 inst
.instruction
|= inst
.operands
[0].reg
;
11238 inst
.reloc
.pc_rel
= 1;
11239 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11245 inst
.instruction
|= inst
.operands
[0].imm
;
11251 unsigned Rd
, Rn
, Rm
;
11253 Rd
= inst
.operands
[0].reg
;
11254 Rn
= (inst
.operands
[1].present
11255 ? inst
.operands
[1].reg
: Rd
);
11256 Rm
= inst
.operands
[2].reg
;
11258 reject_bad_reg (Rd
);
11259 reject_bad_reg (Rn
);
11260 reject_bad_reg (Rm
);
11262 inst
.instruction
|= Rd
<< 8;
11263 inst
.instruction
|= Rn
<< 16;
11264 inst
.instruction
|= Rm
;
11270 if (unified_syntax
&& inst
.size_req
== 4)
11271 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11273 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11279 unsigned int cond
= inst
.operands
[0].imm
;
11281 set_it_insn_type (IT_INSN
);
11282 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11284 now_it
.warn_deprecated
= FALSE
;
11286 /* If the condition is a negative condition, invert the mask. */
11287 if ((cond
& 0x1) == 0x0)
11289 unsigned int mask
= inst
.instruction
& 0x000f;
11291 if ((mask
& 0x7) == 0)
11293 /* No conversion needed. */
11294 now_it
.block_length
= 1;
11296 else if ((mask
& 0x3) == 0)
11299 now_it
.block_length
= 2;
11301 else if ((mask
& 0x1) == 0)
11304 now_it
.block_length
= 3;
11309 now_it
.block_length
= 4;
11312 inst
.instruction
&= 0xfff0;
11313 inst
.instruction
|= mask
;
11316 inst
.instruction
|= cond
<< 4;
11319 /* Helper function used for both push/pop and ldm/stm. */
11321 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11325 load
= (inst
.instruction
& (1 << 20)) != 0;
11327 if (mask
& (1 << 13))
11328 inst
.error
= _("SP not allowed in register list");
11330 if ((mask
& (1 << base
)) != 0
11332 inst
.error
= _("having the base register in the register list when "
11333 "using write back is UNPREDICTABLE");
11337 if (mask
& (1 << 15))
11339 if (mask
& (1 << 14))
11340 inst
.error
= _("LR and PC should not both be in register list");
11342 set_it_insn_type_last ();
11347 if (mask
& (1 << 15))
11348 inst
.error
= _("PC not allowed in register list");
11351 if ((mask
& (mask
- 1)) == 0)
11353 /* Single register transfers implemented as str/ldr. */
11356 if (inst
.instruction
& (1 << 23))
11357 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11359 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11363 if (inst
.instruction
& (1 << 23))
11364 inst
.instruction
= 0x00800000; /* ia -> [base] */
11366 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11369 inst
.instruction
|= 0xf8400000;
11371 inst
.instruction
|= 0x00100000;
11373 mask
= ffs (mask
) - 1;
11376 else if (writeback
)
11377 inst
.instruction
|= WRITE_BACK
;
11379 inst
.instruction
|= mask
;
11380 inst
.instruction
|= base
<< 16;
11386 /* This really doesn't seem worth it. */
11387 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11388 _("expression too complex"));
11389 constraint (inst
.operands
[1].writeback
,
11390 _("Thumb load/store multiple does not support {reglist}^"));
11392 if (unified_syntax
)
11394 bfd_boolean narrow
;
11398 /* See if we can use a 16-bit instruction. */
11399 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11400 && inst
.size_req
!= 4
11401 && !(inst
.operands
[1].imm
& ~0xff))
11403 mask
= 1 << inst
.operands
[0].reg
;
11405 if (inst
.operands
[0].reg
<= 7)
11407 if (inst
.instruction
== T_MNEM_stmia
11408 ? inst
.operands
[0].writeback
11409 : (inst
.operands
[0].writeback
11410 == !(inst
.operands
[1].imm
& mask
)))
11412 if (inst
.instruction
== T_MNEM_stmia
11413 && (inst
.operands
[1].imm
& mask
)
11414 && (inst
.operands
[1].imm
& (mask
- 1)))
11415 as_warn (_("value stored for r%d is UNKNOWN"),
11416 inst
.operands
[0].reg
);
11418 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11419 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11420 inst
.instruction
|= inst
.operands
[1].imm
;
11423 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11425 /* This means 1 register in reg list one of 3 situations:
11426 1. Instruction is stmia, but without writeback.
11427 2. lmdia without writeback, but with Rn not in
11429 3. ldmia with writeback, but with Rn in reglist.
11430 Case 3 is UNPREDICTABLE behaviour, so we handle
11431 case 1 and 2 which can be converted into a 16-bit
11432 str or ldr. The SP cases are handled below. */
11433 unsigned long opcode
;
11434 /* First, record an error for Case 3. */
11435 if (inst
.operands
[1].imm
& mask
11436 && inst
.operands
[0].writeback
)
11438 _("having the base register in the register list when "
11439 "using write back is UNPREDICTABLE");
11441 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11443 inst
.instruction
= THUMB_OP16 (opcode
);
11444 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11445 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11449 else if (inst
.operands
[0] .reg
== REG_SP
)
11451 if (inst
.operands
[0].writeback
)
11454 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11455 ? T_MNEM_push
: T_MNEM_pop
);
11456 inst
.instruction
|= inst
.operands
[1].imm
;
11459 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11462 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11463 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11464 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11472 if (inst
.instruction
< 0xffff)
11473 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11475 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11476 inst
.operands
[0].writeback
);
11481 constraint (inst
.operands
[0].reg
> 7
11482 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11483 constraint (inst
.instruction
!= T_MNEM_ldmia
11484 && inst
.instruction
!= T_MNEM_stmia
,
11485 _("Thumb-2 instruction only valid in unified syntax"));
11486 if (inst
.instruction
== T_MNEM_stmia
)
11488 if (!inst
.operands
[0].writeback
)
11489 as_warn (_("this instruction will write back the base register"));
11490 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11491 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11492 as_warn (_("value stored for r%d is UNKNOWN"),
11493 inst
.operands
[0].reg
);
11497 if (!inst
.operands
[0].writeback
11498 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11499 as_warn (_("this instruction will write back the base register"));
11500 else if (inst
.operands
[0].writeback
11501 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11502 as_warn (_("this instruction will not write back the base register"));
11505 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11506 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11507 inst
.instruction
|= inst
.operands
[1].imm
;
11514 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11515 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11516 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11517 || inst
.operands
[1].negative
,
11520 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11522 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11523 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11524 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11530 if (!inst
.operands
[1].present
)
11532 constraint (inst
.operands
[0].reg
== REG_LR
,
11533 _("r14 not allowed as first register "
11534 "when second register is omitted"));
11535 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11537 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11540 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11541 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11542 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11548 unsigned long opcode
;
11551 if (inst
.operands
[0].isreg
11552 && !inst
.operands
[0].preind
11553 && inst
.operands
[0].reg
== REG_PC
)
11554 set_it_insn_type_last ();
11556 opcode
= inst
.instruction
;
11557 if (unified_syntax
)
11559 if (!inst
.operands
[1].isreg
)
11561 if (opcode
<= 0xffff)
11562 inst
.instruction
= THUMB_OP32 (opcode
);
11563 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11566 if (inst
.operands
[1].isreg
11567 && !inst
.operands
[1].writeback
11568 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11569 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11570 && opcode
<= 0xffff
11571 && inst
.size_req
!= 4)
11573 /* Insn may have a 16-bit form. */
11574 Rn
= inst
.operands
[1].reg
;
11575 if (inst
.operands
[1].immisreg
)
11577 inst
.instruction
= THUMB_OP16 (opcode
);
11579 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11581 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11582 reject_bad_reg (inst
.operands
[1].imm
);
11584 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11585 && opcode
!= T_MNEM_ldrsb
)
11586 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11587 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11594 if (inst
.reloc
.pc_rel
)
11595 opcode
= T_MNEM_ldr_pc2
;
11597 opcode
= T_MNEM_ldr_pc
;
11601 if (opcode
== T_MNEM_ldr
)
11602 opcode
= T_MNEM_ldr_sp
;
11604 opcode
= T_MNEM_str_sp
;
11606 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11610 inst
.instruction
= inst
.operands
[0].reg
;
11611 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11613 inst
.instruction
|= THUMB_OP16 (opcode
);
11614 if (inst
.size_req
== 2)
11615 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11617 inst
.relax
= opcode
;
11621 /* Definitely a 32-bit variant. */
11623 /* Warning for Erratum 752419. */
11624 if (opcode
== T_MNEM_ldr
11625 && inst
.operands
[0].reg
== REG_SP
11626 && inst
.operands
[1].writeback
== 1
11627 && !inst
.operands
[1].immisreg
)
11629 if (no_cpu_selected ()
11630 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11631 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11632 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11633 as_warn (_("This instruction may be unpredictable "
11634 "if executed on M-profile cores "
11635 "with interrupts enabled."));
11638 /* Do some validations regarding addressing modes. */
11639 if (inst
.operands
[1].immisreg
)
11640 reject_bad_reg (inst
.operands
[1].imm
);
11642 constraint (inst
.operands
[1].writeback
== 1
11643 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11646 inst
.instruction
= THUMB_OP32 (opcode
);
11647 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11648 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11649 check_ldr_r15_aligned ();
11653 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11655 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11657 /* Only [Rn,Rm] is acceptable. */
11658 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11659 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11660 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11661 || inst
.operands
[1].negative
,
11662 _("Thumb does not support this addressing mode"));
11663 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11667 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11668 if (!inst
.operands
[1].isreg
)
11669 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11672 constraint (!inst
.operands
[1].preind
11673 || inst
.operands
[1].shifted
11674 || inst
.operands
[1].writeback
,
11675 _("Thumb does not support this addressing mode"));
11676 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11678 constraint (inst
.instruction
& 0x0600,
11679 _("byte or halfword not valid for base register"));
11680 constraint (inst
.operands
[1].reg
== REG_PC
11681 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11682 _("r15 based store not allowed"));
11683 constraint (inst
.operands
[1].immisreg
,
11684 _("invalid base register for register offset"));
11686 if (inst
.operands
[1].reg
== REG_PC
)
11687 inst
.instruction
= T_OPCODE_LDR_PC
;
11688 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11689 inst
.instruction
= T_OPCODE_LDR_SP
;
11691 inst
.instruction
= T_OPCODE_STR_SP
;
11693 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11694 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11698 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11699 if (!inst
.operands
[1].immisreg
)
11701 /* Immediate offset. */
11702 inst
.instruction
|= inst
.operands
[0].reg
;
11703 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11704 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11708 /* Register offset. */
11709 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11710 constraint (inst
.operands
[1].negative
,
11711 _("Thumb does not support this addressing mode"));
11714 switch (inst
.instruction
)
11716 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11717 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11718 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11719 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11720 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11721 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11722 case 0x5600 /* ldrsb */:
11723 case 0x5e00 /* ldrsh */: break;
11727 inst
.instruction
|= inst
.operands
[0].reg
;
11728 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11729 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11735 if (!inst
.operands
[1].present
)
11737 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11738 constraint (inst
.operands
[0].reg
== REG_LR
,
11739 _("r14 not allowed here"));
11740 constraint (inst
.operands
[0].reg
== REG_R12
,
11741 _("r12 not allowed here"));
11744 if (inst
.operands
[2].writeback
11745 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11746 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11747 as_warn (_("base register written back, and overlaps "
11748 "one of transfer registers"));
11750 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11751 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11752 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11758 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11759 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11765 unsigned Rd
, Rn
, Rm
, Ra
;
11767 Rd
= inst
.operands
[0].reg
;
11768 Rn
= inst
.operands
[1].reg
;
11769 Rm
= inst
.operands
[2].reg
;
11770 Ra
= inst
.operands
[3].reg
;
11772 reject_bad_reg (Rd
);
11773 reject_bad_reg (Rn
);
11774 reject_bad_reg (Rm
);
11775 reject_bad_reg (Ra
);
11777 inst
.instruction
|= Rd
<< 8;
11778 inst
.instruction
|= Rn
<< 16;
11779 inst
.instruction
|= Rm
;
11780 inst
.instruction
|= Ra
<< 12;
11786 unsigned RdLo
, RdHi
, Rn
, Rm
;
11788 RdLo
= inst
.operands
[0].reg
;
11789 RdHi
= inst
.operands
[1].reg
;
11790 Rn
= inst
.operands
[2].reg
;
11791 Rm
= inst
.operands
[3].reg
;
11793 reject_bad_reg (RdLo
);
11794 reject_bad_reg (RdHi
);
11795 reject_bad_reg (Rn
);
11796 reject_bad_reg (Rm
);
11798 inst
.instruction
|= RdLo
<< 12;
11799 inst
.instruction
|= RdHi
<< 8;
11800 inst
.instruction
|= Rn
<< 16;
11801 inst
.instruction
|= Rm
;
11805 do_t_mov_cmp (void)
11809 Rn
= inst
.operands
[0].reg
;
11810 Rm
= inst
.operands
[1].reg
;
11813 set_it_insn_type_last ();
11815 if (unified_syntax
)
11817 int r0off
= (inst
.instruction
== T_MNEM_mov
11818 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11819 unsigned long opcode
;
11820 bfd_boolean narrow
;
11821 bfd_boolean low_regs
;
11823 low_regs
= (Rn
<= 7 && Rm
<= 7);
11824 opcode
= inst
.instruction
;
11825 if (in_it_block ())
11826 narrow
= opcode
!= T_MNEM_movs
;
11828 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11829 if (inst
.size_req
== 4
11830 || inst
.operands
[1].shifted
)
11833 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11834 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11835 && !inst
.operands
[1].shifted
11839 inst
.instruction
= T2_SUBS_PC_LR
;
11843 if (opcode
== T_MNEM_cmp
)
11845 constraint (Rn
== REG_PC
, BAD_PC
);
11848 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11850 warn_deprecated_sp (Rm
);
11851 /* R15 was documented as a valid choice for Rm in ARMv6,
11852 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11853 tools reject R15, so we do too. */
11854 constraint (Rm
== REG_PC
, BAD_PC
);
11857 reject_bad_reg (Rm
);
11859 else if (opcode
== T_MNEM_mov
11860 || opcode
== T_MNEM_movs
)
11862 if (inst
.operands
[1].isreg
)
11864 if (opcode
== T_MNEM_movs
)
11866 reject_bad_reg (Rn
);
11867 reject_bad_reg (Rm
);
11871 /* This is mov.n. */
11872 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11873 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11875 as_tsktsk (_("Use of r%u as a source register is "
11876 "deprecated when r%u is the destination "
11877 "register."), Rm
, Rn
);
11882 /* This is mov.w. */
11883 constraint (Rn
== REG_PC
, BAD_PC
);
11884 constraint (Rm
== REG_PC
, BAD_PC
);
11885 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11889 reject_bad_reg (Rn
);
11892 if (!inst
.operands
[1].isreg
)
11894 /* Immediate operand. */
11895 if (!in_it_block () && opcode
== T_MNEM_mov
)
11897 if (low_regs
&& narrow
)
11899 inst
.instruction
= THUMB_OP16 (opcode
);
11900 inst
.instruction
|= Rn
<< 8;
11901 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11902 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11904 if (inst
.size_req
== 2)
11905 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11907 inst
.relax
= opcode
;
11912 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11913 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
11914 THUMB1_RELOC_ONLY
);
11916 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11917 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11918 inst
.instruction
|= Rn
<< r0off
;
11919 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11922 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11923 && (inst
.instruction
== T_MNEM_mov
11924 || inst
.instruction
== T_MNEM_movs
))
11926 /* Register shifts are encoded as separate shift instructions. */
11927 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11929 if (in_it_block ())
11934 if (inst
.size_req
== 4)
11937 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11943 switch (inst
.operands
[1].shift_kind
)
11946 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11949 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11952 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11955 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
11961 inst
.instruction
= opcode
;
11964 inst
.instruction
|= Rn
;
11965 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
11970 inst
.instruction
|= CONDS_BIT
;
11972 inst
.instruction
|= Rn
<< 8;
11973 inst
.instruction
|= Rm
<< 16;
11974 inst
.instruction
|= inst
.operands
[1].imm
;
11979 /* Some mov with immediate shift have narrow variants.
11980 Register shifts are handled above. */
11981 if (low_regs
&& inst
.operands
[1].shifted
11982 && (inst
.instruction
== T_MNEM_mov
11983 || inst
.instruction
== T_MNEM_movs
))
11985 if (in_it_block ())
11986 narrow
= (inst
.instruction
== T_MNEM_mov
);
11988 narrow
= (inst
.instruction
== T_MNEM_movs
);
11993 switch (inst
.operands
[1].shift_kind
)
11995 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11996 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11997 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11998 default: narrow
= FALSE
; break;
12004 inst
.instruction
|= Rn
;
12005 inst
.instruction
|= Rm
<< 3;
12006 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12010 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12011 inst
.instruction
|= Rn
<< r0off
;
12012 encode_thumb32_shifted_operand (1);
12016 switch (inst
.instruction
)
12019 /* In v4t or v5t a move of two lowregs produces unpredictable
12020 results. Don't allow this. */
12023 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12024 "MOV Rd, Rs with two low registers is not "
12025 "permitted on this architecture");
12026 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12030 inst
.instruction
= T_OPCODE_MOV_HR
;
12031 inst
.instruction
|= (Rn
& 0x8) << 4;
12032 inst
.instruction
|= (Rn
& 0x7);
12033 inst
.instruction
|= Rm
<< 3;
12037 /* We know we have low registers at this point.
12038 Generate LSLS Rd, Rs, #0. */
12039 inst
.instruction
= T_OPCODE_LSL_I
;
12040 inst
.instruction
|= Rn
;
12041 inst
.instruction
|= Rm
<< 3;
12047 inst
.instruction
= T_OPCODE_CMP_LR
;
12048 inst
.instruction
|= Rn
;
12049 inst
.instruction
|= Rm
<< 3;
12053 inst
.instruction
= T_OPCODE_CMP_HR
;
12054 inst
.instruction
|= (Rn
& 0x8) << 4;
12055 inst
.instruction
|= (Rn
& 0x7);
12056 inst
.instruction
|= Rm
<< 3;
12063 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12065 /* PR 10443: Do not silently ignore shifted operands. */
12066 constraint (inst
.operands
[1].shifted
,
12067 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12069 if (inst
.operands
[1].isreg
)
12071 if (Rn
< 8 && Rm
< 8)
12073 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12074 since a MOV instruction produces unpredictable results. */
12075 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12076 inst
.instruction
= T_OPCODE_ADD_I3
;
12078 inst
.instruction
= T_OPCODE_CMP_LR
;
12080 inst
.instruction
|= Rn
;
12081 inst
.instruction
|= Rm
<< 3;
12085 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12086 inst
.instruction
= T_OPCODE_MOV_HR
;
12088 inst
.instruction
= T_OPCODE_CMP_HR
;
12094 constraint (Rn
> 7,
12095 _("only lo regs allowed with immediate"));
12096 inst
.instruction
|= Rn
<< 8;
12097 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
12108 top
= (inst
.instruction
& 0x00800000) != 0;
12109 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
12111 constraint (top
, _(":lower16: not allowed this instruction"));
12112 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12114 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12116 constraint (!top
, _(":upper16: not allowed this instruction"));
12117 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12120 Rd
= inst
.operands
[0].reg
;
12121 reject_bad_reg (Rd
);
12123 inst
.instruction
|= Rd
<< 8;
12124 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12126 imm
= inst
.reloc
.exp
.X_add_number
;
12127 inst
.instruction
|= (imm
& 0xf000) << 4;
12128 inst
.instruction
|= (imm
& 0x0800) << 15;
12129 inst
.instruction
|= (imm
& 0x0700) << 4;
12130 inst
.instruction
|= (imm
& 0x00ff);
12135 do_t_mvn_tst (void)
12139 Rn
= inst
.operands
[0].reg
;
12140 Rm
= inst
.operands
[1].reg
;
12142 if (inst
.instruction
== T_MNEM_cmp
12143 || inst
.instruction
== T_MNEM_cmn
)
12144 constraint (Rn
== REG_PC
, BAD_PC
);
12146 reject_bad_reg (Rn
);
12147 reject_bad_reg (Rm
);
12149 if (unified_syntax
)
12151 int r0off
= (inst
.instruction
== T_MNEM_mvn
12152 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12153 bfd_boolean narrow
;
12155 if (inst
.size_req
== 4
12156 || inst
.instruction
> 0xffff
12157 || inst
.operands
[1].shifted
12158 || Rn
> 7 || Rm
> 7)
12160 else if (inst
.instruction
== T_MNEM_cmn
12161 || inst
.instruction
== T_MNEM_tst
)
12163 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12164 narrow
= !in_it_block ();
12166 narrow
= in_it_block ();
12168 if (!inst
.operands
[1].isreg
)
12170 /* For an immediate, we always generate a 32-bit opcode;
12171 section relaxation will shrink it later if possible. */
12172 if (inst
.instruction
< 0xffff)
12173 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12174 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12175 inst
.instruction
|= Rn
<< r0off
;
12176 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12180 /* See if we can do this with a 16-bit instruction. */
12183 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12184 inst
.instruction
|= Rn
;
12185 inst
.instruction
|= Rm
<< 3;
12189 constraint (inst
.operands
[1].shifted
12190 && inst
.operands
[1].immisreg
,
12191 _("shift must be constant"));
12192 if (inst
.instruction
< 0xffff)
12193 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12194 inst
.instruction
|= Rn
<< r0off
;
12195 encode_thumb32_shifted_operand (1);
12201 constraint (inst
.instruction
> 0xffff
12202 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12203 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12204 _("unshifted register required"));
12205 constraint (Rn
> 7 || Rm
> 7,
12208 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12209 inst
.instruction
|= Rn
;
12210 inst
.instruction
|= Rm
<< 3;
12219 if (do_vfp_nsyn_mrs () == SUCCESS
)
12222 Rd
= inst
.operands
[0].reg
;
12223 reject_bad_reg (Rd
);
12224 inst
.instruction
|= Rd
<< 8;
12226 if (inst
.operands
[1].isreg
)
12228 unsigned br
= inst
.operands
[1].reg
;
12229 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12230 as_bad (_("bad register for mrs"));
12232 inst
.instruction
|= br
& (0xf << 16);
12233 inst
.instruction
|= (br
& 0x300) >> 4;
12234 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12238 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12240 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12242 /* PR gas/12698: The constraint is only applied for m_profile.
12243 If the user has specified -march=all, we want to ignore it as
12244 we are building for any CPU type, including non-m variants. */
12245 bfd_boolean m_profile
=
12246 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12247 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12248 "not support requested special purpose register"));
12251 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12253 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12254 _("'APSR', 'CPSR' or 'SPSR' expected"));
12256 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12257 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12258 inst
.instruction
|= 0xf0000;
12268 if (do_vfp_nsyn_msr () == SUCCESS
)
12271 constraint (!inst
.operands
[1].isreg
,
12272 _("Thumb encoding does not support an immediate here"));
12274 if (inst
.operands
[0].isreg
)
12275 flags
= (int)(inst
.operands
[0].reg
);
12277 flags
= inst
.operands
[0].imm
;
12279 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12281 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12283 /* PR gas/12698: The constraint is only applied for m_profile.
12284 If the user has specified -march=all, we want to ignore it as
12285 we are building for any CPU type, including non-m variants. */
12286 bfd_boolean m_profile
=
12287 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12288 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12289 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12290 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12291 && bits
!= PSR_f
)) && m_profile
,
12292 _("selected processor does not support requested special "
12293 "purpose register"));
12296 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12297 "requested special purpose register"));
12299 Rn
= inst
.operands
[1].reg
;
12300 reject_bad_reg (Rn
);
12302 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12303 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12304 inst
.instruction
|= (flags
& 0x300) >> 4;
12305 inst
.instruction
|= (flags
& 0xff);
12306 inst
.instruction
|= Rn
<< 16;
12312 bfd_boolean narrow
;
12313 unsigned Rd
, Rn
, Rm
;
12315 if (!inst
.operands
[2].present
)
12316 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12318 Rd
= inst
.operands
[0].reg
;
12319 Rn
= inst
.operands
[1].reg
;
12320 Rm
= inst
.operands
[2].reg
;
12322 if (unified_syntax
)
12324 if (inst
.size_req
== 4
12330 else if (inst
.instruction
== T_MNEM_muls
)
12331 narrow
= !in_it_block ();
12333 narrow
= in_it_block ();
12337 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12338 constraint (Rn
> 7 || Rm
> 7,
12345 /* 16-bit MULS/Conditional MUL. */
12346 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12347 inst
.instruction
|= Rd
;
12350 inst
.instruction
|= Rm
<< 3;
12352 inst
.instruction
|= Rn
<< 3;
12354 constraint (1, _("dest must overlap one source register"));
12358 constraint (inst
.instruction
!= T_MNEM_mul
,
12359 _("Thumb-2 MUL must not set flags"));
12361 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12362 inst
.instruction
|= Rd
<< 8;
12363 inst
.instruction
|= Rn
<< 16;
12364 inst
.instruction
|= Rm
<< 0;
12366 reject_bad_reg (Rd
);
12367 reject_bad_reg (Rn
);
12368 reject_bad_reg (Rm
);
12375 unsigned RdLo
, RdHi
, Rn
, Rm
;
12377 RdLo
= inst
.operands
[0].reg
;
12378 RdHi
= inst
.operands
[1].reg
;
12379 Rn
= inst
.operands
[2].reg
;
12380 Rm
= inst
.operands
[3].reg
;
12382 reject_bad_reg (RdLo
);
12383 reject_bad_reg (RdHi
);
12384 reject_bad_reg (Rn
);
12385 reject_bad_reg (Rm
);
12387 inst
.instruction
|= RdLo
<< 12;
12388 inst
.instruction
|= RdHi
<< 8;
12389 inst
.instruction
|= Rn
<< 16;
12390 inst
.instruction
|= Rm
;
12393 as_tsktsk (_("rdhi and rdlo must be different"));
12399 set_it_insn_type (NEUTRAL_IT_INSN
);
12401 if (unified_syntax
)
12403 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12405 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12406 inst
.instruction
|= inst
.operands
[0].imm
;
12410 /* PR9722: Check for Thumb2 availability before
12411 generating a thumb2 nop instruction. */
12412 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12414 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12415 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12418 inst
.instruction
= 0x46c0;
12423 constraint (inst
.operands
[0].present
,
12424 _("Thumb does not support NOP with hints"));
12425 inst
.instruction
= 0x46c0;
12432 if (unified_syntax
)
12434 bfd_boolean narrow
;
12436 if (THUMB_SETS_FLAGS (inst
.instruction
))
12437 narrow
= !in_it_block ();
12439 narrow
= in_it_block ();
12440 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12442 if (inst
.size_req
== 4)
12447 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12448 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12449 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12453 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12454 inst
.instruction
|= inst
.operands
[0].reg
;
12455 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12460 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12462 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12464 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12465 inst
.instruction
|= inst
.operands
[0].reg
;
12466 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12475 Rd
= inst
.operands
[0].reg
;
12476 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12478 reject_bad_reg (Rd
);
12479 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12480 reject_bad_reg (Rn
);
12482 inst
.instruction
|= Rd
<< 8;
12483 inst
.instruction
|= Rn
<< 16;
12485 if (!inst
.operands
[2].isreg
)
12487 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12488 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12494 Rm
= inst
.operands
[2].reg
;
12495 reject_bad_reg (Rm
);
12497 constraint (inst
.operands
[2].shifted
12498 && inst
.operands
[2].immisreg
,
12499 _("shift must be constant"));
12500 encode_thumb32_shifted_operand (2);
12507 unsigned Rd
, Rn
, Rm
;
12509 Rd
= inst
.operands
[0].reg
;
12510 Rn
= inst
.operands
[1].reg
;
12511 Rm
= inst
.operands
[2].reg
;
12513 reject_bad_reg (Rd
);
12514 reject_bad_reg (Rn
);
12515 reject_bad_reg (Rm
);
12517 inst
.instruction
|= Rd
<< 8;
12518 inst
.instruction
|= Rn
<< 16;
12519 inst
.instruction
|= Rm
;
12520 if (inst
.operands
[3].present
)
12522 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12523 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12524 _("expression too complex"));
12525 inst
.instruction
|= (val
& 0x1c) << 10;
12526 inst
.instruction
|= (val
& 0x03) << 6;
12533 if (!inst
.operands
[3].present
)
12537 inst
.instruction
&= ~0x00000020;
12539 /* PR 10168. Swap the Rm and Rn registers. */
12540 Rtmp
= inst
.operands
[1].reg
;
12541 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12542 inst
.operands
[2].reg
= Rtmp
;
12550 if (inst
.operands
[0].immisreg
)
12551 reject_bad_reg (inst
.operands
[0].imm
);
12553 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12557 do_t_push_pop (void)
12561 constraint (inst
.operands
[0].writeback
,
12562 _("push/pop do not support {reglist}^"));
12563 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12564 _("expression too complex"));
12566 mask
= inst
.operands
[0].imm
;
12567 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12568 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12569 else if (inst
.size_req
!= 4
12570 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
12571 ? REG_LR
: REG_PC
)))
12573 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12574 inst
.instruction
|= THUMB_PP_PC_LR
;
12575 inst
.instruction
|= mask
& 0xff;
12577 else if (unified_syntax
)
12579 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12580 encode_thumb2_ldmstm (13, mask
, TRUE
);
12584 inst
.error
= _("invalid register list to push/pop instruction");
12594 Rd
= inst
.operands
[0].reg
;
12595 Rm
= inst
.operands
[1].reg
;
12597 reject_bad_reg (Rd
);
12598 reject_bad_reg (Rm
);
12600 inst
.instruction
|= Rd
<< 8;
12601 inst
.instruction
|= Rm
<< 16;
12602 inst
.instruction
|= Rm
;
12610 Rd
= inst
.operands
[0].reg
;
12611 Rm
= inst
.operands
[1].reg
;
12613 reject_bad_reg (Rd
);
12614 reject_bad_reg (Rm
);
12616 if (Rd
<= 7 && Rm
<= 7
12617 && inst
.size_req
!= 4)
12619 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12620 inst
.instruction
|= Rd
;
12621 inst
.instruction
|= Rm
<< 3;
12623 else if (unified_syntax
)
12625 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12626 inst
.instruction
|= Rd
<< 8;
12627 inst
.instruction
|= Rm
<< 16;
12628 inst
.instruction
|= Rm
;
12631 inst
.error
= BAD_HIREG
;
12639 Rd
= inst
.operands
[0].reg
;
12640 Rm
= inst
.operands
[1].reg
;
12642 reject_bad_reg (Rd
);
12643 reject_bad_reg (Rm
);
12645 inst
.instruction
|= Rd
<< 8;
12646 inst
.instruction
|= Rm
;
12654 Rd
= inst
.operands
[0].reg
;
12655 Rs
= (inst
.operands
[1].present
12656 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12657 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12659 reject_bad_reg (Rd
);
12660 reject_bad_reg (Rs
);
12661 if (inst
.operands
[2].isreg
)
12662 reject_bad_reg (inst
.operands
[2].reg
);
12664 inst
.instruction
|= Rd
<< 8;
12665 inst
.instruction
|= Rs
<< 16;
12666 if (!inst
.operands
[2].isreg
)
12668 bfd_boolean narrow
;
12670 if ((inst
.instruction
& 0x00100000) != 0)
12671 narrow
= !in_it_block ();
12673 narrow
= in_it_block ();
12675 if (Rd
> 7 || Rs
> 7)
12678 if (inst
.size_req
== 4 || !unified_syntax
)
12681 if (inst
.reloc
.exp
.X_op
!= O_constant
12682 || inst
.reloc
.exp
.X_add_number
!= 0)
12685 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12686 relaxation, but it doesn't seem worth the hassle. */
12689 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12690 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12691 inst
.instruction
|= Rs
<< 3;
12692 inst
.instruction
|= Rd
;
12696 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12697 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12701 encode_thumb32_shifted_operand (2);
12707 if (warn_on_deprecated
12708 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12709 as_tsktsk (_("setend use is deprecated for ARMv8"));
12711 set_it_insn_type (OUTSIDE_IT_INSN
);
12712 if (inst
.operands
[0].imm
)
12713 inst
.instruction
|= 0x8;
12719 if (!inst
.operands
[1].present
)
12720 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12722 if (unified_syntax
)
12724 bfd_boolean narrow
;
12727 switch (inst
.instruction
)
12730 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12732 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12734 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12736 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12740 if (THUMB_SETS_FLAGS (inst
.instruction
))
12741 narrow
= !in_it_block ();
12743 narrow
= in_it_block ();
12744 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12746 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12748 if (inst
.operands
[2].isreg
12749 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12750 || inst
.operands
[2].reg
> 7))
12752 if (inst
.size_req
== 4)
12755 reject_bad_reg (inst
.operands
[0].reg
);
12756 reject_bad_reg (inst
.operands
[1].reg
);
12760 if (inst
.operands
[2].isreg
)
12762 reject_bad_reg (inst
.operands
[2].reg
);
12763 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12764 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12765 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12766 inst
.instruction
|= inst
.operands
[2].reg
;
12768 /* PR 12854: Error on extraneous shifts. */
12769 constraint (inst
.operands
[2].shifted
,
12770 _("extraneous shift as part of operand to shift insn"));
12774 inst
.operands
[1].shifted
= 1;
12775 inst
.operands
[1].shift_kind
= shift_kind
;
12776 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12777 ? T_MNEM_movs
: T_MNEM_mov
);
12778 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12779 encode_thumb32_shifted_operand (1);
12780 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12781 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12786 if (inst
.operands
[2].isreg
)
12788 switch (shift_kind
)
12790 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12791 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12792 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12793 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12797 inst
.instruction
|= inst
.operands
[0].reg
;
12798 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12800 /* PR 12854: Error on extraneous shifts. */
12801 constraint (inst
.operands
[2].shifted
,
12802 _("extraneous shift as part of operand to shift insn"));
12806 switch (shift_kind
)
12808 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12809 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12810 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12813 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12814 inst
.instruction
|= inst
.operands
[0].reg
;
12815 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12821 constraint (inst
.operands
[0].reg
> 7
12822 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12823 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12825 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12827 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12828 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12829 _("source1 and dest must be same register"));
12831 switch (inst
.instruction
)
12833 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12834 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12835 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12836 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12840 inst
.instruction
|= inst
.operands
[0].reg
;
12841 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12843 /* PR 12854: Error on extraneous shifts. */
12844 constraint (inst
.operands
[2].shifted
,
12845 _("extraneous shift as part of operand to shift insn"));
12849 switch (inst
.instruction
)
12851 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12852 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12853 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12854 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12857 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12858 inst
.instruction
|= inst
.operands
[0].reg
;
12859 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12867 unsigned Rd
, Rn
, Rm
;
12869 Rd
= inst
.operands
[0].reg
;
12870 Rn
= inst
.operands
[1].reg
;
12871 Rm
= inst
.operands
[2].reg
;
12873 reject_bad_reg (Rd
);
12874 reject_bad_reg (Rn
);
12875 reject_bad_reg (Rm
);
12877 inst
.instruction
|= Rd
<< 8;
12878 inst
.instruction
|= Rn
<< 16;
12879 inst
.instruction
|= Rm
;
12885 unsigned Rd
, Rn
, Rm
;
12887 Rd
= inst
.operands
[0].reg
;
12888 Rm
= inst
.operands
[1].reg
;
12889 Rn
= inst
.operands
[2].reg
;
12891 reject_bad_reg (Rd
);
12892 reject_bad_reg (Rn
);
12893 reject_bad_reg (Rm
);
12895 inst
.instruction
|= Rd
<< 8;
12896 inst
.instruction
|= Rn
<< 16;
12897 inst
.instruction
|= Rm
;
12903 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12904 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12905 _("SMC is not permitted on this architecture"));
12906 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12907 _("expression too complex"));
12908 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12909 inst
.instruction
|= (value
& 0xf000) >> 12;
12910 inst
.instruction
|= (value
& 0x0ff0);
12911 inst
.instruction
|= (value
& 0x000f) << 16;
12912 /* PR gas/15623: SMC instructions must be last in an IT block. */
12913 set_it_insn_type_last ();
12919 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12921 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12922 inst
.instruction
|= (value
& 0x0fff);
12923 inst
.instruction
|= (value
& 0xf000) << 4;
12927 do_t_ssat_usat (int bias
)
12931 Rd
= inst
.operands
[0].reg
;
12932 Rn
= inst
.operands
[2].reg
;
12934 reject_bad_reg (Rd
);
12935 reject_bad_reg (Rn
);
12937 inst
.instruction
|= Rd
<< 8;
12938 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12939 inst
.instruction
|= Rn
<< 16;
12941 if (inst
.operands
[3].present
)
12943 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12945 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12947 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12948 _("expression too complex"));
12950 if (shift_amount
!= 0)
12952 constraint (shift_amount
> 31,
12953 _("shift expression is too large"));
12955 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
12956 inst
.instruction
|= 0x00200000; /* sh bit. */
12958 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
12959 inst
.instruction
|= (shift_amount
& 0x03) << 6;
12967 do_t_ssat_usat (1);
12975 Rd
= inst
.operands
[0].reg
;
12976 Rn
= inst
.operands
[2].reg
;
12978 reject_bad_reg (Rd
);
12979 reject_bad_reg (Rn
);
12981 inst
.instruction
|= Rd
<< 8;
12982 inst
.instruction
|= inst
.operands
[1].imm
- 1;
12983 inst
.instruction
|= Rn
<< 16;
12989 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
12990 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
12991 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
12992 || inst
.operands
[2].negative
,
12995 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
12997 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12998 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12999 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13000 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13006 if (!inst
.operands
[2].present
)
13007 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13009 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13010 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13011 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13014 inst
.instruction
|= inst
.operands
[0].reg
;
13015 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13016 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13017 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13023 unsigned Rd
, Rn
, Rm
;
13025 Rd
= inst
.operands
[0].reg
;
13026 Rn
= inst
.operands
[1].reg
;
13027 Rm
= inst
.operands
[2].reg
;
13029 reject_bad_reg (Rd
);
13030 reject_bad_reg (Rn
);
13031 reject_bad_reg (Rm
);
13033 inst
.instruction
|= Rd
<< 8;
13034 inst
.instruction
|= Rn
<< 16;
13035 inst
.instruction
|= Rm
;
13036 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13044 Rd
= inst
.operands
[0].reg
;
13045 Rm
= inst
.operands
[1].reg
;
13047 reject_bad_reg (Rd
);
13048 reject_bad_reg (Rm
);
13050 if (inst
.instruction
<= 0xffff
13051 && inst
.size_req
!= 4
13052 && Rd
<= 7 && Rm
<= 7
13053 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13055 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13056 inst
.instruction
|= Rd
;
13057 inst
.instruction
|= Rm
<< 3;
13059 else if (unified_syntax
)
13061 if (inst
.instruction
<= 0xffff)
13062 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13063 inst
.instruction
|= Rd
<< 8;
13064 inst
.instruction
|= Rm
;
13065 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13069 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13070 _("Thumb encoding does not support rotation"));
13071 constraint (1, BAD_HIREG
);
13078 /* We have to do the following check manually as ARM_EXT_OS only applies
13080 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
13082 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
13083 /* This only applies to the v6m howver, not later architectures. */
13084 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
13085 as_bad (_("SVC is not permitted on this architecture"));
13086 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
13089 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
13098 half
= (inst
.instruction
& 0x10) != 0;
13099 set_it_insn_type_last ();
13100 constraint (inst
.operands
[0].immisreg
,
13101 _("instruction requires register index"));
13103 Rn
= inst
.operands
[0].reg
;
13104 Rm
= inst
.operands
[0].imm
;
13106 constraint (Rn
== REG_SP
, BAD_SP
);
13107 reject_bad_reg (Rm
);
13109 constraint (!half
&& inst
.operands
[0].shifted
,
13110 _("instruction does not allow shifted index"));
13111 inst
.instruction
|= (Rn
<< 16) | Rm
;
13117 if (!inst
.operands
[0].present
)
13118 inst
.operands
[0].imm
= 0;
13120 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13122 constraint (inst
.size_req
== 2,
13123 _("immediate value out of range"));
13124 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13125 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13126 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13130 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13131 inst
.instruction
|= inst
.operands
[0].imm
;
13134 set_it_insn_type (NEUTRAL_IT_INSN
);
13141 do_t_ssat_usat (0);
13149 Rd
= inst
.operands
[0].reg
;
13150 Rn
= inst
.operands
[2].reg
;
13152 reject_bad_reg (Rd
);
13153 reject_bad_reg (Rn
);
13155 inst
.instruction
|= Rd
<< 8;
13156 inst
.instruction
|= inst
.operands
[1].imm
;
13157 inst
.instruction
|= Rn
<< 16;
13160 /* Neon instruction encoder helpers. */
13162 /* Encodings for the different types for various Neon opcodes. */
13164 /* An "invalid" code for the following tables. */
13167 struct neon_tab_entry
13170 unsigned float_or_poly
;
13171 unsigned scalar_or_imm
;
13174 /* Map overloaded Neon opcodes to their respective encodings. */
13175 #define NEON_ENC_TAB \
13176 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13177 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13178 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13179 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13180 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13181 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13182 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13183 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13184 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13185 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13186 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13187 /* Register variants of the following two instructions are encoded as
13188 vcge / vcgt with the operands reversed. */ \
13189 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13190 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13191 X(vfma, N_INV, 0x0000c10, N_INV), \
13192 X(vfms, N_INV, 0x0200c10, N_INV), \
13193 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13194 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13195 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13196 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13197 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13198 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13199 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13200 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13201 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13202 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13203 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13204 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13205 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13206 X(vshl, 0x0000400, N_INV, 0x0800510), \
13207 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13208 X(vand, 0x0000110, N_INV, 0x0800030), \
13209 X(vbic, 0x0100110, N_INV, 0x0800030), \
13210 X(veor, 0x1000110, N_INV, N_INV), \
13211 X(vorn, 0x0300110, N_INV, 0x0800010), \
13212 X(vorr, 0x0200110, N_INV, 0x0800010), \
13213 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13214 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13215 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13216 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13217 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13218 X(vst1, 0x0000000, 0x0800000, N_INV), \
13219 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13220 X(vst2, 0x0000100, 0x0800100, N_INV), \
13221 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13222 X(vst3, 0x0000200, 0x0800200, N_INV), \
13223 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13224 X(vst4, 0x0000300, 0x0800300, N_INV), \
13225 X(vmovn, 0x1b20200, N_INV, N_INV), \
13226 X(vtrn, 0x1b20080, N_INV, N_INV), \
13227 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13228 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13229 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13230 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13231 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13232 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13233 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13234 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13235 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13236 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13237 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13238 X(vseleq, 0xe000a00, N_INV, N_INV), \
13239 X(vselvs, 0xe100a00, N_INV, N_INV), \
13240 X(vselge, 0xe200a00, N_INV, N_INV), \
13241 X(vselgt, 0xe300a00, N_INV, N_INV), \
13242 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13243 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13244 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13245 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13246 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13247 X(aes, 0x3b00300, N_INV, N_INV), \
13248 X(sha3op, 0x2000c00, N_INV, N_INV), \
13249 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13250 X(sha2op, 0x3ba0380, N_INV, N_INV)
13254 #define X(OPC,I,F,S) N_MNEM_##OPC
13259 static const struct neon_tab_entry neon_enc_tab
[] =
13261 #define X(OPC,I,F,S) { (I), (F), (S) }
13266 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13267 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13268 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13269 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13270 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13271 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13272 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13273 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13274 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13275 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13276 #define NEON_ENC_SINGLE_(X) \
13277 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13278 #define NEON_ENC_DOUBLE_(X) \
13279 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13280 #define NEON_ENC_FPV8_(X) \
13281 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13283 #define NEON_ENCODE(type, inst) \
13286 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13287 inst.is_neon = 1; \
13291 #define check_neon_suffixes \
13294 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13296 as_bad (_("invalid neon suffix for non neon instruction")); \
13302 /* Define shapes for instruction operands. The following mnemonic characters
13303 are used in this table:
13305 F - VFP S<n> register
13306 D - Neon D<n> register
13307 Q - Neon Q<n> register
13311 L - D<n> register list
13313 This table is used to generate various data:
13314 - enumerations of the form NS_DDR to be used as arguments to
13316 - a table classifying shapes into single, double, quad, mixed.
13317 - a table used to drive neon_select_shape. */
13319 #define NEON_SHAPE_DEF \
13320 X(3, (D, D, D), DOUBLE), \
13321 X(3, (Q, Q, Q), QUAD), \
13322 X(3, (D, D, I), DOUBLE), \
13323 X(3, (Q, Q, I), QUAD), \
13324 X(3, (D, D, S), DOUBLE), \
13325 X(3, (Q, Q, S), QUAD), \
13326 X(2, (D, D), DOUBLE), \
13327 X(2, (Q, Q), QUAD), \
13328 X(2, (D, S), DOUBLE), \
13329 X(2, (Q, S), QUAD), \
13330 X(2, (D, R), DOUBLE), \
13331 X(2, (Q, R), QUAD), \
13332 X(2, (D, I), DOUBLE), \
13333 X(2, (Q, I), QUAD), \
13334 X(3, (D, L, D), DOUBLE), \
13335 X(2, (D, Q), MIXED), \
13336 X(2, (Q, D), MIXED), \
13337 X(3, (D, Q, I), MIXED), \
13338 X(3, (Q, D, I), MIXED), \
13339 X(3, (Q, D, D), MIXED), \
13340 X(3, (D, Q, Q), MIXED), \
13341 X(3, (Q, Q, D), MIXED), \
13342 X(3, (Q, D, S), MIXED), \
13343 X(3, (D, Q, S), MIXED), \
13344 X(4, (D, D, D, I), DOUBLE), \
13345 X(4, (Q, Q, Q, I), QUAD), \
13346 X(2, (F, F), SINGLE), \
13347 X(3, (F, F, F), SINGLE), \
13348 X(2, (F, I), SINGLE), \
13349 X(2, (F, D), MIXED), \
13350 X(2, (D, F), MIXED), \
13351 X(3, (F, F, I), MIXED), \
13352 X(4, (R, R, F, F), SINGLE), \
13353 X(4, (F, F, R, R), SINGLE), \
13354 X(3, (D, R, R), DOUBLE), \
13355 X(3, (R, R, D), DOUBLE), \
13356 X(2, (S, R), SINGLE), \
13357 X(2, (R, S), SINGLE), \
13358 X(2, (F, R), SINGLE), \
13359 X(2, (R, F), SINGLE), \
13360 /* Half float shape supported so far. */\
13361 X (2, (H, D), MIXED), \
13362 X (2, (D, H), MIXED), \
13363 X (2, (H, F), MIXED), \
13364 X (2, (F, H), MIXED), \
13365 X (2, (H, H), HALF), \
13366 X (2, (H, R), HALF), \
13367 X (2, (R, H), HALF), \
13368 X (2, (H, I), HALF), \
13369 X (3, (H, H, H), HALF), \
13370 X (3, (H, F, I), MIXED), \
13371 X (3, (F, H, I), MIXED)
13373 #define S2(A,B) NS_##A##B
13374 #define S3(A,B,C) NS_##A##B##C
13375 #define S4(A,B,C,D) NS_##A##B##C##D
13377 #define X(N, L, C) S##N L
13390 enum neon_shape_class
13399 #define X(N, L, C) SC_##C
13401 static enum neon_shape_class neon_shape_class
[] =
13420 /* Register widths of above. */
13421 static unsigned neon_shape_el_size
[] =
13433 struct neon_shape_info
13436 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13439 #define S2(A,B) { SE_##A, SE_##B }
13440 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13441 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13443 #define X(N, L, C) { N, S##N L }
13445 static struct neon_shape_info neon_shape_tab
[] =
13455 /* Bit masks used in type checking given instructions.
13456 'N_EQK' means the type must be the same as (or based on in some way) the key
13457 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13458 set, various other bits can be set as well in order to modify the meaning of
13459 the type constraint. */
13461 enum neon_type_mask
13485 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13486 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13487 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13488 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13489 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13490 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13491 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13492 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13493 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13494 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13495 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13497 N_MAX_NONSPECIAL
= N_P64
13500 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13502 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13503 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13504 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13505 #define N_S_32 (N_S8 | N_S16 | N_S32)
13506 #define N_F_16_32 (N_F16 | N_F32)
13507 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13508 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13509 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13510 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13512 /* Pass this as the first type argument to neon_check_type to ignore types
13514 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13516 /* Select a "shape" for the current instruction (describing register types or
13517 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13518 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13519 function of operand parsing, so this function doesn't need to be called.
13520 Shapes should be listed in order of decreasing length. */
13522 static enum neon_shape
13523 neon_select_shape (enum neon_shape shape
, ...)
13526 enum neon_shape first_shape
= shape
;
13528 /* Fix missing optional operands. FIXME: we don't know at this point how
13529 many arguments we should have, so this makes the assumption that we have
13530 > 1. This is true of all current Neon opcodes, I think, but may not be
13531 true in the future. */
13532 if (!inst
.operands
[1].present
)
13533 inst
.operands
[1] = inst
.operands
[0];
13535 va_start (ap
, shape
);
13537 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13542 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13544 if (!inst
.operands
[j
].present
)
13550 switch (neon_shape_tab
[shape
].el
[j
])
13552 /* If a .f16, .16, .u16, .s16 type specifier is given over
13553 a VFP single precision register operand, it's essentially
13554 means only half of the register is used.
13556 If the type specifier is given after the mnemonics, the
13557 information is stored in inst.vectype. If the type specifier
13558 is given after register operand, the information is stored
13559 in inst.operands[].vectype.
13561 When there is only one type specifier, and all the register
13562 operands are the same type of hardware register, the type
13563 specifier applies to all register operands.
13565 If no type specifier is given, the shape is inferred from
13566 operand information.
13569 vadd.f16 s0, s1, s2: NS_HHH
13570 vabs.f16 s0, s1: NS_HH
13571 vmov.f16 s0, r1: NS_HR
13572 vmov.f16 r0, s1: NS_RH
13573 vcvt.f16 r0, s1: NS_RH
13574 vcvt.f16.s32 s2, s2, #29: NS_HFI
13575 vcvt.f16.s32 s2, s2: NS_HF
13578 if (!(inst
.operands
[j
].isreg
13579 && inst
.operands
[j
].isvec
13580 && inst
.operands
[j
].issingle
13581 && !inst
.operands
[j
].isquad
13582 && ((inst
.vectype
.elems
== 1
13583 && inst
.vectype
.el
[0].size
== 16)
13584 || (inst
.vectype
.elems
> 1
13585 && inst
.vectype
.el
[j
].size
== 16)
13586 || (inst
.vectype
.elems
== 0
13587 && inst
.operands
[j
].vectype
.type
!= NT_invtype
13588 && inst
.operands
[j
].vectype
.size
== 16))))
13593 if (!(inst
.operands
[j
].isreg
13594 && inst
.operands
[j
].isvec
13595 && inst
.operands
[j
].issingle
13596 && !inst
.operands
[j
].isquad
13597 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
13598 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
13599 || (inst
.vectype
.elems
== 0
13600 && (inst
.operands
[j
].vectype
.size
== 32
13601 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
13606 if (!(inst
.operands
[j
].isreg
13607 && inst
.operands
[j
].isvec
13608 && !inst
.operands
[j
].isquad
13609 && !inst
.operands
[j
].issingle
))
13614 if (!(inst
.operands
[j
].isreg
13615 && !inst
.operands
[j
].isvec
))
13620 if (!(inst
.operands
[j
].isreg
13621 && inst
.operands
[j
].isvec
13622 && inst
.operands
[j
].isquad
13623 && !inst
.operands
[j
].issingle
))
13628 if (!(!inst
.operands
[j
].isreg
13629 && !inst
.operands
[j
].isscalar
))
13634 if (!(!inst
.operands
[j
].isreg
13635 && inst
.operands
[j
].isscalar
))
13645 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13646 /* We've matched all the entries in the shape table, and we don't
13647 have any left over operands which have not been matched. */
13653 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13654 first_error (_("invalid instruction shape"));
13659 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13660 means the Q bit should be set). */
13663 neon_quad (enum neon_shape shape
)
13665 return neon_shape_class
[shape
] == SC_QUAD
;
13669 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13672 /* Allow modification to be made to types which are constrained to be
13673 based on the key element, based on bits set alongside N_EQK. */
13674 if ((typebits
& N_EQK
) != 0)
13676 if ((typebits
& N_HLF
) != 0)
13678 else if ((typebits
& N_DBL
) != 0)
13680 if ((typebits
& N_SGN
) != 0)
13681 *g_type
= NT_signed
;
13682 else if ((typebits
& N_UNS
) != 0)
13683 *g_type
= NT_unsigned
;
13684 else if ((typebits
& N_INT
) != 0)
13685 *g_type
= NT_integer
;
13686 else if ((typebits
& N_FLT
) != 0)
13687 *g_type
= NT_float
;
13688 else if ((typebits
& N_SIZ
) != 0)
13689 *g_type
= NT_untyped
;
13693 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13694 operand type, i.e. the single type specified in a Neon instruction when it
13695 is the only one given. */
13697 static struct neon_type_el
13698 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13700 struct neon_type_el dest
= *key
;
13702 gas_assert ((thisarg
& N_EQK
) != 0);
13704 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13709 /* Convert Neon type and size into compact bitmask representation. */
13711 static enum neon_type_mask
13712 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13719 case 8: return N_8
;
13720 case 16: return N_16
;
13721 case 32: return N_32
;
13722 case 64: return N_64
;
13730 case 8: return N_I8
;
13731 case 16: return N_I16
;
13732 case 32: return N_I32
;
13733 case 64: return N_I64
;
13741 case 16: return N_F16
;
13742 case 32: return N_F32
;
13743 case 64: return N_F64
;
13751 case 8: return N_P8
;
13752 case 16: return N_P16
;
13753 case 64: return N_P64
;
13761 case 8: return N_S8
;
13762 case 16: return N_S16
;
13763 case 32: return N_S32
;
13764 case 64: return N_S64
;
13772 case 8: return N_U8
;
13773 case 16: return N_U16
;
13774 case 32: return N_U32
;
13775 case 64: return N_U64
;
13786 /* Convert compact Neon bitmask type representation to a type and size. Only
13787 handles the case where a single bit is set in the mask. */
13790 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13791 enum neon_type_mask mask
)
13793 if ((mask
& N_EQK
) != 0)
13796 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13798 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13800 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13802 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13807 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13809 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13810 *type
= NT_unsigned
;
13811 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13812 *type
= NT_integer
;
13813 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13814 *type
= NT_untyped
;
13815 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13817 else if ((mask
& (N_F_ALL
)) != 0)
13825 /* Modify a bitmask of allowed types. This is only needed for type
13829 modify_types_allowed (unsigned allowed
, unsigned mods
)
13832 enum neon_el_type type
;
13838 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13840 if (el_type_of_type_chk (&type
, &size
,
13841 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13843 neon_modify_type_size (mods
, &type
, &size
);
13844 destmask
|= type_chk_of_el_type (type
, size
);
13851 /* Check type and return type classification.
13852 The manual states (paraphrase): If one datatype is given, it indicates the
13854 - the second operand, if there is one
13855 - the operand, if there is no second operand
13856 - the result, if there are no operands.
13857 This isn't quite good enough though, so we use a concept of a "key" datatype
13858 which is set on a per-instruction basis, which is the one which matters when
13859 only one data type is written.
13860 Note: this function has side-effects (e.g. filling in missing operands). All
13861 Neon instructions should call it before performing bit encoding. */
13863 static struct neon_type_el
13864 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13867 unsigned i
, pass
, key_el
= 0;
13868 unsigned types
[NEON_MAX_TYPE_ELS
];
13869 enum neon_el_type k_type
= NT_invtype
;
13870 unsigned k_size
= -1u;
13871 struct neon_type_el badtype
= {NT_invtype
, -1};
13872 unsigned key_allowed
= 0;
13874 /* Optional registers in Neon instructions are always (not) in operand 1.
13875 Fill in the missing operand here, if it was omitted. */
13876 if (els
> 1 && !inst
.operands
[1].present
)
13877 inst
.operands
[1] = inst
.operands
[0];
13879 /* Suck up all the varargs. */
13881 for (i
= 0; i
< els
; i
++)
13883 unsigned thisarg
= va_arg (ap
, unsigned);
13884 if (thisarg
== N_IGNORE_TYPE
)
13889 types
[i
] = thisarg
;
13890 if ((thisarg
& N_KEY
) != 0)
13895 if (inst
.vectype
.elems
> 0)
13896 for (i
= 0; i
< els
; i
++)
13897 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13899 first_error (_("types specified in both the mnemonic and operands"));
13903 /* Duplicate inst.vectype elements here as necessary.
13904 FIXME: No idea if this is exactly the same as the ARM assembler,
13905 particularly when an insn takes one register and one non-register
13907 if (inst
.vectype
.elems
== 1 && els
> 1)
13910 inst
.vectype
.elems
= els
;
13911 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13912 for (j
= 0; j
< els
; j
++)
13914 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13917 else if (inst
.vectype
.elems
== 0 && els
> 0)
13920 /* No types were given after the mnemonic, so look for types specified
13921 after each operand. We allow some flexibility here; as long as the
13922 "key" operand has a type, we can infer the others. */
13923 for (j
= 0; j
< els
; j
++)
13924 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13925 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13927 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13929 for (j
= 0; j
< els
; j
++)
13930 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13931 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13936 first_error (_("operand types can't be inferred"));
13940 else if (inst
.vectype
.elems
!= els
)
13942 first_error (_("type specifier has the wrong number of parts"));
13946 for (pass
= 0; pass
< 2; pass
++)
13948 for (i
= 0; i
< els
; i
++)
13950 unsigned thisarg
= types
[i
];
13951 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13952 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
13953 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
13954 unsigned g_size
= inst
.vectype
.el
[i
].size
;
13956 /* Decay more-specific signed & unsigned types to sign-insensitive
13957 integer types if sign-specific variants are unavailable. */
13958 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
13959 && (types_allowed
& N_SU_ALL
) == 0)
13960 g_type
= NT_integer
;
13962 /* If only untyped args are allowed, decay any more specific types to
13963 them. Some instructions only care about signs for some element
13964 sizes, so handle that properly. */
13965 if (((types_allowed
& N_UNT
) == 0)
13966 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
13967 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
13968 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
13969 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
13970 g_type
= NT_untyped
;
13974 if ((thisarg
& N_KEY
) != 0)
13978 key_allowed
= thisarg
& ~N_KEY
;
13980 /* Check architecture constraint on FP16 extension. */
13982 && k_type
== NT_float
13983 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
13985 inst
.error
= _(BAD_FP16
);
13992 if ((thisarg
& N_VFP
) != 0)
13994 enum neon_shape_el regshape
;
13995 unsigned regwidth
, match
;
13997 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14000 first_error (_("invalid instruction shape"));
14003 regshape
= neon_shape_tab
[ns
].el
[i
];
14004 regwidth
= neon_shape_el_size
[regshape
];
14006 /* In VFP mode, operands must match register widths. If we
14007 have a key operand, use its width, else use the width of
14008 the current operand. */
14014 /* FP16 will use a single precision register. */
14015 if (regwidth
== 32 && match
== 16)
14017 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14021 inst
.error
= _(BAD_FP16
);
14026 if (regwidth
!= match
)
14028 first_error (_("operand size must match register width"));
14033 if ((thisarg
& N_EQK
) == 0)
14035 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14037 if ((given_type
& types_allowed
) == 0)
14039 first_error (_("bad type in Neon instruction"));
14045 enum neon_el_type mod_k_type
= k_type
;
14046 unsigned mod_k_size
= k_size
;
14047 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14048 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14050 first_error (_("inconsistent types in Neon instruction"));
14058 return inst
.vectype
.el
[key_el
];
14061 /* Neon-style VFP instruction forwarding. */
14063 /* Thumb VFP instructions have 0xE in the condition field. */
14066 do_vfp_cond_or_thumb (void)
14071 inst
.instruction
|= 0xe0000000;
14073 inst
.instruction
|= inst
.cond
<< 28;
14076 /* Look up and encode a simple mnemonic, for use as a helper function for the
14077 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14078 etc. It is assumed that operand parsing has already been done, and that the
14079 operands are in the form expected by the given opcode (this isn't necessarily
14080 the same as the form in which they were parsed, hence some massaging must
14081 take place before this function is called).
14082 Checks current arch version against that in the looked-up opcode. */
14085 do_vfp_nsyn_opcode (const char *opname
)
14087 const struct asm_opcode
*opcode
;
14089 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14094 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14095 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14102 inst
.instruction
= opcode
->tvalue
;
14103 opcode
->tencode ();
14107 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14108 opcode
->aencode ();
14113 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14115 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14117 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14120 do_vfp_nsyn_opcode ("fadds");
14122 do_vfp_nsyn_opcode ("fsubs");
14124 /* ARMv8.2 fp16 instruction. */
14126 do_scalar_fp16_v82_encode ();
14131 do_vfp_nsyn_opcode ("faddd");
14133 do_vfp_nsyn_opcode ("fsubd");
14137 /* Check operand types to see if this is a VFP instruction, and if so call
14141 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14143 enum neon_shape rs
;
14144 struct neon_type_el et
;
14149 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14150 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14154 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14155 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14156 N_F_ALL
| N_KEY
| N_VFP
);
14163 if (et
.type
!= NT_invtype
)
14174 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14176 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14178 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14181 do_vfp_nsyn_opcode ("fmacs");
14183 do_vfp_nsyn_opcode ("fnmacs");
14185 /* ARMv8.2 fp16 instruction. */
14187 do_scalar_fp16_v82_encode ();
14192 do_vfp_nsyn_opcode ("fmacd");
14194 do_vfp_nsyn_opcode ("fnmacd");
14199 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14201 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14203 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14206 do_vfp_nsyn_opcode ("ffmas");
14208 do_vfp_nsyn_opcode ("ffnmas");
14210 /* ARMv8.2 fp16 instruction. */
14212 do_scalar_fp16_v82_encode ();
14217 do_vfp_nsyn_opcode ("ffmad");
14219 do_vfp_nsyn_opcode ("ffnmad");
14224 do_vfp_nsyn_mul (enum neon_shape rs
)
14226 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14228 do_vfp_nsyn_opcode ("fmuls");
14230 /* ARMv8.2 fp16 instruction. */
14232 do_scalar_fp16_v82_encode ();
14235 do_vfp_nsyn_opcode ("fmuld");
14239 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14241 int is_neg
= (inst
.instruction
& 0x80) != 0;
14242 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14244 if (rs
== NS_FF
|| rs
== NS_HH
)
14247 do_vfp_nsyn_opcode ("fnegs");
14249 do_vfp_nsyn_opcode ("fabss");
14251 /* ARMv8.2 fp16 instruction. */
14253 do_scalar_fp16_v82_encode ();
14258 do_vfp_nsyn_opcode ("fnegd");
14260 do_vfp_nsyn_opcode ("fabsd");
14264 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14265 insns belong to Neon, and are handled elsewhere. */
14268 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14270 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14274 do_vfp_nsyn_opcode ("fldmdbs");
14276 do_vfp_nsyn_opcode ("fldmias");
14281 do_vfp_nsyn_opcode ("fstmdbs");
14283 do_vfp_nsyn_opcode ("fstmias");
14288 do_vfp_nsyn_sqrt (void)
14290 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14291 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14293 if (rs
== NS_FF
|| rs
== NS_HH
)
14295 do_vfp_nsyn_opcode ("fsqrts");
14297 /* ARMv8.2 fp16 instruction. */
14299 do_scalar_fp16_v82_encode ();
14302 do_vfp_nsyn_opcode ("fsqrtd");
14306 do_vfp_nsyn_div (void)
14308 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14309 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14310 N_F_ALL
| N_KEY
| N_VFP
);
14312 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14314 do_vfp_nsyn_opcode ("fdivs");
14316 /* ARMv8.2 fp16 instruction. */
14318 do_scalar_fp16_v82_encode ();
14321 do_vfp_nsyn_opcode ("fdivd");
14325 do_vfp_nsyn_nmul (void)
14327 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14328 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14329 N_F_ALL
| N_KEY
| N_VFP
);
14331 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14333 NEON_ENCODE (SINGLE
, inst
);
14334 do_vfp_sp_dyadic ();
14336 /* ARMv8.2 fp16 instruction. */
14338 do_scalar_fp16_v82_encode ();
14342 NEON_ENCODE (DOUBLE
, inst
);
14343 do_vfp_dp_rd_rn_rm ();
14345 do_vfp_cond_or_thumb ();
14350 do_vfp_nsyn_cmp (void)
14352 enum neon_shape rs
;
14353 if (inst
.operands
[1].isreg
)
14355 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14356 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14358 if (rs
== NS_FF
|| rs
== NS_HH
)
14360 NEON_ENCODE (SINGLE
, inst
);
14361 do_vfp_sp_monadic ();
14365 NEON_ENCODE (DOUBLE
, inst
);
14366 do_vfp_dp_rd_rm ();
14371 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14372 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14374 switch (inst
.instruction
& 0x0fffffff)
14377 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14380 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14386 if (rs
== NS_FI
|| rs
== NS_HI
)
14388 NEON_ENCODE (SINGLE
, inst
);
14389 do_vfp_sp_compare_z ();
14393 NEON_ENCODE (DOUBLE
, inst
);
14397 do_vfp_cond_or_thumb ();
14399 /* ARMv8.2 fp16 instruction. */
14400 if (rs
== NS_HI
|| rs
== NS_HH
)
14401 do_scalar_fp16_v82_encode ();
14405 nsyn_insert_sp (void)
14407 inst
.operands
[1] = inst
.operands
[0];
14408 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14409 inst
.operands
[0].reg
= REG_SP
;
14410 inst
.operands
[0].isreg
= 1;
14411 inst
.operands
[0].writeback
= 1;
14412 inst
.operands
[0].present
= 1;
14416 do_vfp_nsyn_push (void)
14420 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14421 _("register list must contain at least 1 and at most 16 "
14424 if (inst
.operands
[1].issingle
)
14425 do_vfp_nsyn_opcode ("fstmdbs");
14427 do_vfp_nsyn_opcode ("fstmdbd");
14431 do_vfp_nsyn_pop (void)
14435 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14436 _("register list must contain at least 1 and at most 16 "
14439 if (inst
.operands
[1].issingle
)
14440 do_vfp_nsyn_opcode ("fldmias");
14442 do_vfp_nsyn_opcode ("fldmiad");
14445 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14446 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14449 neon_dp_fixup (struct arm_it
* insn
)
14451 unsigned int i
= insn
->instruction
;
14456 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14467 insn
->instruction
= i
;
14470 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14474 neon_logbits (unsigned x
)
14476 return ffs (x
) - 4;
14479 #define LOW4(R) ((R) & 0xf)
14480 #define HI1(R) (((R) >> 4) & 1)
14482 /* Encode insns with bit pattern:
14484 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14485 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14487 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14488 different meaning for some instruction. */
14491 neon_three_same (int isquad
, int ubit
, int size
)
14493 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14494 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14495 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14496 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14497 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14498 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14499 inst
.instruction
|= (isquad
!= 0) << 6;
14500 inst
.instruction
|= (ubit
!= 0) << 24;
14502 inst
.instruction
|= neon_logbits (size
) << 20;
14504 neon_dp_fixup (&inst
);
14507 /* Encode instructions of the form:
14509 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14510 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14512 Don't write size if SIZE == -1. */
14515 neon_two_same (int qbit
, int ubit
, int size
)
14517 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14518 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14519 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14520 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14521 inst
.instruction
|= (qbit
!= 0) << 6;
14522 inst
.instruction
|= (ubit
!= 0) << 24;
14525 inst
.instruction
|= neon_logbits (size
) << 18;
14527 neon_dp_fixup (&inst
);
14530 /* Neon instruction encoders, in approximate order of appearance. */
14533 do_neon_dyadic_i_su (void)
14535 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14536 struct neon_type_el et
= neon_check_type (3, rs
,
14537 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14538 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14542 do_neon_dyadic_i64_su (void)
14544 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14545 struct neon_type_el et
= neon_check_type (3, rs
,
14546 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14547 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14551 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14554 unsigned size
= et
.size
>> 3;
14555 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14556 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14557 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14558 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14559 inst
.instruction
|= (isquad
!= 0) << 6;
14560 inst
.instruction
|= immbits
<< 16;
14561 inst
.instruction
|= (size
>> 3) << 7;
14562 inst
.instruction
|= (size
& 0x7) << 19;
14564 inst
.instruction
|= (uval
!= 0) << 24;
14566 neon_dp_fixup (&inst
);
14570 do_neon_shl_imm (void)
14572 if (!inst
.operands
[2].isreg
)
14574 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14575 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14576 int imm
= inst
.operands
[2].imm
;
14578 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14579 _("immediate out of range for shift"));
14580 NEON_ENCODE (IMMED
, inst
);
14581 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14585 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14586 struct neon_type_el et
= neon_check_type (3, rs
,
14587 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14590 /* VSHL/VQSHL 3-register variants have syntax such as:
14592 whereas other 3-register operations encoded by neon_three_same have
14595 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14597 tmp
= inst
.operands
[2].reg
;
14598 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14599 inst
.operands
[1].reg
= tmp
;
14600 NEON_ENCODE (INTEGER
, inst
);
14601 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14606 do_neon_qshl_imm (void)
14608 if (!inst
.operands
[2].isreg
)
14610 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14611 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14612 int imm
= inst
.operands
[2].imm
;
14614 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14615 _("immediate out of range for shift"));
14616 NEON_ENCODE (IMMED
, inst
);
14617 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14621 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14622 struct neon_type_el et
= neon_check_type (3, rs
,
14623 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14626 /* See note in do_neon_shl_imm. */
14627 tmp
= inst
.operands
[2].reg
;
14628 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14629 inst
.operands
[1].reg
= tmp
;
14630 NEON_ENCODE (INTEGER
, inst
);
14631 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14636 do_neon_rshl (void)
14638 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14639 struct neon_type_el et
= neon_check_type (3, rs
,
14640 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14643 tmp
= inst
.operands
[2].reg
;
14644 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14645 inst
.operands
[1].reg
= tmp
;
14646 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14650 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14652 /* Handle .I8 pseudo-instructions. */
14655 /* Unfortunately, this will make everything apart from zero out-of-range.
14656 FIXME is this the intended semantics? There doesn't seem much point in
14657 accepting .I8 if so. */
14658 immediate
|= immediate
<< 8;
14664 if (immediate
== (immediate
& 0x000000ff))
14666 *immbits
= immediate
;
14669 else if (immediate
== (immediate
& 0x0000ff00))
14671 *immbits
= immediate
>> 8;
14674 else if (immediate
== (immediate
& 0x00ff0000))
14676 *immbits
= immediate
>> 16;
14679 else if (immediate
== (immediate
& 0xff000000))
14681 *immbits
= immediate
>> 24;
14684 if ((immediate
& 0xffff) != (immediate
>> 16))
14685 goto bad_immediate
;
14686 immediate
&= 0xffff;
14689 if (immediate
== (immediate
& 0x000000ff))
14691 *immbits
= immediate
;
14694 else if (immediate
== (immediate
& 0x0000ff00))
14696 *immbits
= immediate
>> 8;
14701 first_error (_("immediate value out of range"));
14706 do_neon_logic (void)
14708 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14710 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14711 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14712 /* U bit and size field were set as part of the bitmask. */
14713 NEON_ENCODE (INTEGER
, inst
);
14714 neon_three_same (neon_quad (rs
), 0, -1);
14718 const int three_ops_form
= (inst
.operands
[2].present
14719 && !inst
.operands
[2].isreg
);
14720 const int immoperand
= (three_ops_form
? 2 : 1);
14721 enum neon_shape rs
= (three_ops_form
14722 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14723 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14724 struct neon_type_el et
= neon_check_type (2, rs
,
14725 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14726 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14730 if (et
.type
== NT_invtype
)
14733 if (three_ops_form
)
14734 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14735 _("first and second operands shall be the same register"));
14737 NEON_ENCODE (IMMED
, inst
);
14739 immbits
= inst
.operands
[immoperand
].imm
;
14742 /* .i64 is a pseudo-op, so the immediate must be a repeating
14744 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14745 inst
.operands
[immoperand
].reg
: 0))
14747 /* Set immbits to an invalid constant. */
14748 immbits
= 0xdeadbeef;
14755 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14759 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14763 /* Pseudo-instruction for VBIC. */
14764 neon_invert_size (&immbits
, 0, et
.size
);
14765 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14769 /* Pseudo-instruction for VORR. */
14770 neon_invert_size (&immbits
, 0, et
.size
);
14771 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14781 inst
.instruction
|= neon_quad (rs
) << 6;
14782 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14783 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14784 inst
.instruction
|= cmode
<< 8;
14785 neon_write_immbits (immbits
);
14787 neon_dp_fixup (&inst
);
14792 do_neon_bitfield (void)
14794 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14795 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14796 neon_three_same (neon_quad (rs
), 0, -1);
14800 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14803 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14804 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14806 if (et
.type
== NT_float
)
14808 NEON_ENCODE (FLOAT
, inst
);
14809 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
14813 NEON_ENCODE (INTEGER
, inst
);
14814 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14819 do_neon_dyadic_if_su (void)
14821 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14825 do_neon_dyadic_if_su_d (void)
14827 /* This version only allow D registers, but that constraint is enforced during
14828 operand parsing so we don't need to do anything extra here. */
14829 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14833 do_neon_dyadic_if_i_d (void)
14835 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14836 affected if we specify unsigned args. */
14837 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14840 enum vfp_or_neon_is_neon_bits
14843 NEON_CHECK_ARCH
= 2,
14844 NEON_CHECK_ARCH8
= 4
14847 /* Call this function if an instruction which may have belonged to the VFP or
14848 Neon instruction sets, but turned out to be a Neon instruction (due to the
14849 operand types involved, etc.). We have to check and/or fix-up a couple of
14852 - Make sure the user hasn't attempted to make a Neon instruction
14854 - Alter the value in the condition code field if necessary.
14855 - Make sure that the arch supports Neon instructions.
14857 Which of these operations take place depends on bits from enum
14858 vfp_or_neon_is_neon_bits.
14860 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14861 current instruction's condition is COND_ALWAYS, the condition field is
14862 changed to inst.uncond_value. This is necessary because instructions shared
14863 between VFP and Neon may be conditional for the VFP variants only, and the
14864 unconditional Neon version must have, e.g., 0xF in the condition field. */
14867 vfp_or_neon_is_neon (unsigned check
)
14869 /* Conditions are always legal in Thumb mode (IT blocks). */
14870 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14872 if (inst
.cond
!= COND_ALWAYS
)
14874 first_error (_(BAD_COND
));
14877 if (inst
.uncond_value
!= -1)
14878 inst
.instruction
|= inst
.uncond_value
<< 28;
14881 if ((check
& NEON_CHECK_ARCH
)
14882 && !mark_feature_used (&fpu_neon_ext_v1
))
14884 first_error (_(BAD_FPU
));
14888 if ((check
& NEON_CHECK_ARCH8
)
14889 && !mark_feature_used (&fpu_neon_ext_armv8
))
14891 first_error (_(BAD_FPU
));
14899 do_neon_addsub_if_i (void)
14901 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14904 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14907 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14908 affected if we specify unsigned args. */
14909 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14912 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14914 V<op> A,B (A is operand 0, B is operand 2)
14919 so handle that case specially. */
14922 neon_exchange_operands (void)
14924 if (inst
.operands
[1].present
)
14926 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
14928 /* Swap operands[1] and operands[2]. */
14929 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14930 inst
.operands
[1] = inst
.operands
[2];
14931 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14936 inst
.operands
[1] = inst
.operands
[2];
14937 inst
.operands
[2] = inst
.operands
[0];
14942 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14944 if (inst
.operands
[2].isreg
)
14947 neon_exchange_operands ();
14948 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14952 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14953 struct neon_type_el et
= neon_check_type (2, rs
,
14954 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
14956 NEON_ENCODE (IMMED
, inst
);
14957 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14958 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14959 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14960 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14961 inst
.instruction
|= neon_quad (rs
) << 6;
14962 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14963 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14965 neon_dp_fixup (&inst
);
14972 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
14976 do_neon_cmp_inv (void)
14978 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
14984 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
14987 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14988 scalars, which are encoded in 5 bits, M : Rm.
14989 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14990 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14994 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
14996 unsigned regno
= NEON_SCALAR_REG (scalar
);
14997 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15002 if (regno
> 7 || elno
> 3)
15004 return regno
| (elno
<< 3);
15007 if (regno
> 15 || elno
> 1)
15009 return regno
| (elno
<< 4);
15013 first_error (_("scalar out of range for multiply instruction"));
15019 /* Encode multiply / multiply-accumulate scalar instructions. */
15022 neon_mul_mac (struct neon_type_el et
, int ubit
)
15026 /* Give a more helpful error message if we have an invalid type. */
15027 if (et
.type
== NT_invtype
)
15030 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15031 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15032 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15033 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15034 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15035 inst
.instruction
|= LOW4 (scalar
);
15036 inst
.instruction
|= HI1 (scalar
) << 5;
15037 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15038 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15039 inst
.instruction
|= (ubit
!= 0) << 24;
15041 neon_dp_fixup (&inst
);
15045 do_neon_mac_maybe_scalar (void)
15047 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15050 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15053 if (inst
.operands
[2].isscalar
)
15055 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15056 struct neon_type_el et
= neon_check_type (3, rs
,
15057 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15058 NEON_ENCODE (SCALAR
, inst
);
15059 neon_mul_mac (et
, neon_quad (rs
));
15063 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15064 affected if we specify unsigned args. */
15065 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15070 do_neon_fmac (void)
15072 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15075 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15078 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15084 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15085 struct neon_type_el et
= neon_check_type (3, rs
,
15086 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15087 neon_three_same (neon_quad (rs
), 0, et
.size
);
15090 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15091 same types as the MAC equivalents. The polynomial type for this instruction
15092 is encoded the same as the integer type. */
15097 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15100 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15103 if (inst
.operands
[2].isscalar
)
15104 do_neon_mac_maybe_scalar ();
15106 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15110 do_neon_qdmulh (void)
15112 if (inst
.operands
[2].isscalar
)
15114 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15115 struct neon_type_el et
= neon_check_type (3, rs
,
15116 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15117 NEON_ENCODE (SCALAR
, inst
);
15118 neon_mul_mac (et
, neon_quad (rs
));
15122 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15123 struct neon_type_el et
= neon_check_type (3, rs
,
15124 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15125 NEON_ENCODE (INTEGER
, inst
);
15126 /* The U bit (rounding) comes from bit mask. */
15127 neon_three_same (neon_quad (rs
), 0, et
.size
);
15132 do_neon_qrdmlah (void)
15134 /* Check we're on the correct architecture. */
15135 if (!mark_feature_used (&fpu_neon_ext_armv8
))
15137 _("instruction form not available on this architecture.");
15138 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
15140 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15141 record_feature_use (&fpu_neon_ext_v8_1
);
15144 if (inst
.operands
[2].isscalar
)
15146 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15147 struct neon_type_el et
= neon_check_type (3, rs
,
15148 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15149 NEON_ENCODE (SCALAR
, inst
);
15150 neon_mul_mac (et
, neon_quad (rs
));
15154 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15155 struct neon_type_el et
= neon_check_type (3, rs
,
15156 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15157 NEON_ENCODE (INTEGER
, inst
);
15158 /* The U bit (rounding) comes from bit mask. */
15159 neon_three_same (neon_quad (rs
), 0, et
.size
);
15164 do_neon_fcmp_absolute (void)
15166 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15167 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15168 N_F_16_32
| N_KEY
);
15169 /* Size field comes from bit mask. */
15170 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
15174 do_neon_fcmp_absolute_inv (void)
15176 neon_exchange_operands ();
15177 do_neon_fcmp_absolute ();
15181 do_neon_step (void)
15183 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15184 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15185 N_F_16_32
| N_KEY
);
15186 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15190 do_neon_abs_neg (void)
15192 enum neon_shape rs
;
15193 struct neon_type_el et
;
15195 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15198 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15201 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15202 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
15204 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15205 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15206 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15207 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15208 inst
.instruction
|= neon_quad (rs
) << 6;
15209 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15210 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15212 neon_dp_fixup (&inst
);
15218 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15219 struct neon_type_el et
= neon_check_type (2, rs
,
15220 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15221 int imm
= inst
.operands
[2].imm
;
15222 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15223 _("immediate out of range for insert"));
15224 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15230 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15231 struct neon_type_el et
= neon_check_type (2, rs
,
15232 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15233 int imm
= inst
.operands
[2].imm
;
15234 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15235 _("immediate out of range for insert"));
15236 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15240 do_neon_qshlu_imm (void)
15242 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15243 struct neon_type_el et
= neon_check_type (2, rs
,
15244 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15245 int imm
= inst
.operands
[2].imm
;
15246 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15247 _("immediate out of range for shift"));
15248 /* Only encodes the 'U present' variant of the instruction.
15249 In this case, signed types have OP (bit 8) set to 0.
15250 Unsigned types have OP set to 1. */
15251 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15252 /* The rest of the bits are the same as other immediate shifts. */
15253 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15257 do_neon_qmovn (void)
15259 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15260 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15261 /* Saturating move where operands can be signed or unsigned, and the
15262 destination has the same signedness. */
15263 NEON_ENCODE (INTEGER
, inst
);
15264 if (et
.type
== NT_unsigned
)
15265 inst
.instruction
|= 0xc0;
15267 inst
.instruction
|= 0x80;
15268 neon_two_same (0, 1, et
.size
/ 2);
15272 do_neon_qmovun (void)
15274 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15275 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15276 /* Saturating move with unsigned results. Operands must be signed. */
15277 NEON_ENCODE (INTEGER
, inst
);
15278 neon_two_same (0, 1, et
.size
/ 2);
15282 do_neon_rshift_sat_narrow (void)
15284 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15285 or unsigned. If operands are unsigned, results must also be unsigned. */
15286 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15287 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15288 int imm
= inst
.operands
[2].imm
;
15289 /* This gets the bounds check, size encoding and immediate bits calculation
15293 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15294 VQMOVN.I<size> <Dd>, <Qm>. */
15297 inst
.operands
[2].present
= 0;
15298 inst
.instruction
= N_MNEM_vqmovn
;
15303 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15304 _("immediate out of range"));
15305 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15309 do_neon_rshift_sat_narrow_u (void)
15311 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15312 or unsigned. If operands are unsigned, results must also be unsigned. */
15313 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15314 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15315 int imm
= inst
.operands
[2].imm
;
15316 /* This gets the bounds check, size encoding and immediate bits calculation
15320 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15321 VQMOVUN.I<size> <Dd>, <Qm>. */
15324 inst
.operands
[2].present
= 0;
15325 inst
.instruction
= N_MNEM_vqmovun
;
15330 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15331 _("immediate out of range"));
15332 /* FIXME: The manual is kind of unclear about what value U should have in
15333 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15335 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15339 do_neon_movn (void)
15341 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15342 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15343 NEON_ENCODE (INTEGER
, inst
);
15344 neon_two_same (0, 1, et
.size
/ 2);
15348 do_neon_rshift_narrow (void)
15350 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15351 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15352 int imm
= inst
.operands
[2].imm
;
15353 /* This gets the bounds check, size encoding and immediate bits calculation
15357 /* If immediate is zero then we are a pseudo-instruction for
15358 VMOVN.I<size> <Dd>, <Qm> */
15361 inst
.operands
[2].present
= 0;
15362 inst
.instruction
= N_MNEM_vmovn
;
15367 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15368 _("immediate out of range for narrowing operation"));
15369 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15373 do_neon_shll (void)
15375 /* FIXME: Type checking when lengthening. */
15376 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15377 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15378 unsigned imm
= inst
.operands
[2].imm
;
15380 if (imm
== et
.size
)
15382 /* Maximum shift variant. */
15383 NEON_ENCODE (INTEGER
, inst
);
15384 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15385 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15386 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15387 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15388 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15390 neon_dp_fixup (&inst
);
15394 /* A more-specific type check for non-max versions. */
15395 et
= neon_check_type (2, NS_QDI
,
15396 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15397 NEON_ENCODE (IMMED
, inst
);
15398 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15402 /* Check the various types for the VCVT instruction, and return which version
15403 the current instruction is. */
15405 #define CVT_FLAVOUR_VAR \
15406 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15407 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15408 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15409 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15410 /* Half-precision conversions. */ \
15411 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15412 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15413 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15414 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15415 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15416 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15417 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15418 Compared with single/double precision variants, only the co-processor \
15419 field is different, so the encoding flow is reused here. */ \
15420 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15421 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15422 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15423 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15424 /* VFP instructions. */ \
15425 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15426 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15427 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15428 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15429 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15430 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15431 /* VFP instructions with bitshift. */ \
15432 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15433 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15434 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15435 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15436 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15437 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15438 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15439 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15441 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15442 neon_cvt_flavour_##C,
15444 /* The different types of conversions we can do. */
15445 enum neon_cvt_flavour
15448 neon_cvt_flavour_invalid
,
15449 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15454 static enum neon_cvt_flavour
15455 get_neon_cvt_flavour (enum neon_shape rs
)
15457 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15458 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15459 if (et.type != NT_invtype) \
15461 inst.error = NULL; \
15462 return (neon_cvt_flavour_##C); \
15465 struct neon_type_el et
;
15466 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15467 || rs
== NS_FF
) ? N_VFP
: 0;
15468 /* The instruction versions which take an immediate take one register
15469 argument, which is extended to the width of the full register. Thus the
15470 "source" and "destination" registers must have the same width. Hack that
15471 here by making the size equal to the key (wider, in this case) operand. */
15472 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15476 return neon_cvt_flavour_invalid
;
15491 /* Neon-syntax VFP conversions. */
15494 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15496 const char *opname
= 0;
15498 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
15499 || rs
== NS_FHI
|| rs
== NS_HFI
)
15501 /* Conversions with immediate bitshift. */
15502 const char *enc
[] =
15504 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15510 if (flavour
< (int) ARRAY_SIZE (enc
))
15512 opname
= enc
[flavour
];
15513 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15514 _("operands 0 and 1 must be the same register"));
15515 inst
.operands
[1] = inst
.operands
[2];
15516 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15521 /* Conversions without bitshift. */
15522 const char *enc
[] =
15524 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15530 if (flavour
< (int) ARRAY_SIZE (enc
))
15531 opname
= enc
[flavour
];
15535 do_vfp_nsyn_opcode (opname
);
15537 /* ARMv8.2 fp16 VCVT instruction. */
15538 if (flavour
== neon_cvt_flavour_s32_f16
15539 || flavour
== neon_cvt_flavour_u32_f16
15540 || flavour
== neon_cvt_flavour_f16_u32
15541 || flavour
== neon_cvt_flavour_f16_s32
)
15542 do_scalar_fp16_v82_encode ();
15546 do_vfp_nsyn_cvtz (void)
15548 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
15549 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15550 const char *enc
[] =
15552 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15558 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15559 do_vfp_nsyn_opcode (enc
[flavour
]);
15563 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15564 enum neon_cvt_mode mode
)
15569 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15570 D register operands. */
15571 if (flavour
== neon_cvt_flavour_s32_f64
15572 || flavour
== neon_cvt_flavour_u32_f64
)
15573 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15576 if (flavour
== neon_cvt_flavour_s32_f16
15577 || flavour
== neon_cvt_flavour_u32_f16
)
15578 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
15581 set_it_insn_type (OUTSIDE_IT_INSN
);
15585 case neon_cvt_flavour_s32_f64
:
15589 case neon_cvt_flavour_s32_f32
:
15593 case neon_cvt_flavour_s32_f16
:
15597 case neon_cvt_flavour_u32_f64
:
15601 case neon_cvt_flavour_u32_f32
:
15605 case neon_cvt_flavour_u32_f16
:
15610 first_error (_("invalid instruction shape"));
15616 case neon_cvt_mode_a
: rm
= 0; break;
15617 case neon_cvt_mode_n
: rm
= 1; break;
15618 case neon_cvt_mode_p
: rm
= 2; break;
15619 case neon_cvt_mode_m
: rm
= 3; break;
15620 default: first_error (_("invalid rounding mode")); return;
15623 NEON_ENCODE (FPV8
, inst
);
15624 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15625 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15626 inst
.instruction
|= sz
<< 8;
15628 /* ARMv8.2 fp16 VCVT instruction. */
15629 if (flavour
== neon_cvt_flavour_s32_f16
15630 ||flavour
== neon_cvt_flavour_u32_f16
)
15631 do_scalar_fp16_v82_encode ();
15632 inst
.instruction
|= op
<< 7;
15633 inst
.instruction
|= rm
<< 16;
15634 inst
.instruction
|= 0xf0000000;
15635 inst
.is_neon
= TRUE
;
15639 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15641 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15642 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
15643 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
15645 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15647 if (flavour
== neon_cvt_flavour_invalid
)
15650 /* PR11109: Handle round-to-zero for VCVT conversions. */
15651 if (mode
== neon_cvt_mode_z
15652 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15653 && (flavour
== neon_cvt_flavour_s16_f16
15654 || flavour
== neon_cvt_flavour_u16_f16
15655 || flavour
== neon_cvt_flavour_s32_f32
15656 || flavour
== neon_cvt_flavour_u32_f32
15657 || flavour
== neon_cvt_flavour_s32_f64
15658 || flavour
== neon_cvt_flavour_u32_f64
)
15659 && (rs
== NS_FD
|| rs
== NS_FF
))
15661 do_vfp_nsyn_cvtz ();
15665 /* ARMv8.2 fp16 VCVT conversions. */
15666 if (mode
== neon_cvt_mode_z
15667 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
15668 && (flavour
== neon_cvt_flavour_s32_f16
15669 || flavour
== neon_cvt_flavour_u32_f16
)
15672 do_vfp_nsyn_cvtz ();
15673 do_scalar_fp16_v82_encode ();
15677 /* VFP rather than Neon conversions. */
15678 if (flavour
>= neon_cvt_flavour_first_fp
)
15680 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15681 do_vfp_nsyn_cvt (rs
, flavour
);
15683 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15694 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15695 0x0000100, 0x1000100, 0x0, 0x1000000};
15697 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15700 /* Fixed-point conversion with #0 immediate is encoded as an
15701 integer conversion. */
15702 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15704 NEON_ENCODE (IMMED
, inst
);
15705 if (flavour
!= neon_cvt_flavour_invalid
)
15706 inst
.instruction
|= enctab
[flavour
];
15707 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15708 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15709 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15710 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15711 inst
.instruction
|= neon_quad (rs
) << 6;
15712 inst
.instruction
|= 1 << 21;
15713 if (flavour
< neon_cvt_flavour_s16_f16
)
15715 inst
.instruction
|= 1 << 21;
15716 immbits
= 32 - inst
.operands
[2].imm
;
15717 inst
.instruction
|= immbits
<< 16;
15721 inst
.instruction
|= 3 << 20;
15722 immbits
= 16 - inst
.operands
[2].imm
;
15723 inst
.instruction
|= immbits
<< 16;
15724 inst
.instruction
&= ~(1 << 9);
15727 neon_dp_fixup (&inst
);
15733 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15735 NEON_ENCODE (FLOAT
, inst
);
15736 set_it_insn_type (OUTSIDE_IT_INSN
);
15738 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15741 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15742 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15743 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15744 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15745 inst
.instruction
|= neon_quad (rs
) << 6;
15746 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
15747 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
15748 inst
.instruction
|= mode
<< 8;
15749 if (flavour
== neon_cvt_flavour_u16_f16
15750 || flavour
== neon_cvt_flavour_s16_f16
)
15751 /* Mask off the original size bits and reencode them. */
15752 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
15755 inst
.instruction
|= 0xfc000000;
15757 inst
.instruction
|= 0xf0000000;
15763 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
15764 0x100, 0x180, 0x0, 0x080};
15766 NEON_ENCODE (INTEGER
, inst
);
15768 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15771 if (flavour
!= neon_cvt_flavour_invalid
)
15772 inst
.instruction
|= enctab
[flavour
];
15774 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15775 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15776 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15777 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15778 inst
.instruction
|= neon_quad (rs
) << 6;
15779 if (flavour
>= neon_cvt_flavour_s16_f16
15780 && flavour
<= neon_cvt_flavour_f16_u16
)
15781 /* Half precision. */
15782 inst
.instruction
|= 1 << 18;
15784 inst
.instruction
|= 2 << 18;
15786 neon_dp_fixup (&inst
);
15791 /* Half-precision conversions for Advanced SIMD -- neon. */
15796 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15798 as_bad (_("operand size must match register width"));
15803 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15805 as_bad (_("operand size must match register width"));
15810 inst
.instruction
= 0x3b60600;
15812 inst
.instruction
= 0x3b60700;
15814 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15815 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15816 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15817 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15818 neon_dp_fixup (&inst
);
15822 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15823 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15824 do_vfp_nsyn_cvt (rs
, flavour
);
15826 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15831 do_neon_cvtr (void)
15833 do_neon_cvt_1 (neon_cvt_mode_x
);
15839 do_neon_cvt_1 (neon_cvt_mode_z
);
15843 do_neon_cvta (void)
15845 do_neon_cvt_1 (neon_cvt_mode_a
);
15849 do_neon_cvtn (void)
15851 do_neon_cvt_1 (neon_cvt_mode_n
);
15855 do_neon_cvtp (void)
15857 do_neon_cvt_1 (neon_cvt_mode_p
);
15861 do_neon_cvtm (void)
15863 do_neon_cvt_1 (neon_cvt_mode_m
);
15867 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15870 mark_feature_used (&fpu_vfp_ext_armv8
);
15872 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15873 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15874 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15875 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15876 inst
.instruction
|= to
? 0x10000 : 0;
15877 inst
.instruction
|= t
? 0x80 : 0;
15878 inst
.instruction
|= is_double
? 0x100 : 0;
15879 do_vfp_cond_or_thumb ();
15883 do_neon_cvttb_1 (bfd_boolean t
)
15885 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
15886 NS_DF
, NS_DH
, NS_NULL
);
15890 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15893 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15895 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15898 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15900 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15902 /* The VCVTB and VCVTT instructions with D-register operands
15903 don't work for SP only targets. */
15904 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15908 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15910 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15912 /* The VCVTB and VCVTT instructions with D-register operands
15913 don't work for SP only targets. */
15914 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15918 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15925 do_neon_cvtb (void)
15927 do_neon_cvttb_1 (FALSE
);
15932 do_neon_cvtt (void)
15934 do_neon_cvttb_1 (TRUE
);
15938 neon_move_immediate (void)
15940 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15941 struct neon_type_el et
= neon_check_type (2, rs
,
15942 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15943 unsigned immlo
, immhi
= 0, immbits
;
15944 int op
, cmode
, float_p
;
15946 constraint (et
.type
== NT_invtype
,
15947 _("operand size must be specified for immediate VMOV"));
15949 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15950 op
= (inst
.instruction
& (1 << 5)) != 0;
15952 immlo
= inst
.operands
[1].imm
;
15953 if (inst
.operands
[1].regisimm
)
15954 immhi
= inst
.operands
[1].reg
;
15956 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
15957 _("immediate has bits set outside the operand size"));
15959 float_p
= inst
.operands
[1].immisfloat
;
15961 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
15962 et
.size
, et
.type
)) == FAIL
)
15964 /* Invert relevant bits only. */
15965 neon_invert_size (&immlo
, &immhi
, et
.size
);
15966 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15967 with one or the other; those cases are caught by
15968 neon_cmode_for_move_imm. */
15970 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
15971 &op
, et
.size
, et
.type
)) == FAIL
)
15973 first_error (_("immediate out of range"));
15978 inst
.instruction
&= ~(1 << 5);
15979 inst
.instruction
|= op
<< 5;
15981 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15982 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15983 inst
.instruction
|= neon_quad (rs
) << 6;
15984 inst
.instruction
|= cmode
<< 8;
15986 neon_write_immbits (immbits
);
15992 if (inst
.operands
[1].isreg
)
15994 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15996 NEON_ENCODE (INTEGER
, inst
);
15997 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15998 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15999 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16000 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16001 inst
.instruction
|= neon_quad (rs
) << 6;
16005 NEON_ENCODE (IMMED
, inst
);
16006 neon_move_immediate ();
16009 neon_dp_fixup (&inst
);
16012 /* Encode instructions of form:
16014 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16015 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16018 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16020 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16021 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16022 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16023 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16024 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16025 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16026 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16027 inst
.instruction
|= neon_logbits (size
) << 20;
16029 neon_dp_fixup (&inst
);
16033 do_neon_dyadic_long (void)
16035 /* FIXME: Type checking for lengthening op. */
16036 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16037 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16038 neon_mixed_length (et
, et
.size
);
16042 do_neon_abal (void)
16044 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16045 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16046 neon_mixed_length (et
, et
.size
);
16050 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16052 if (inst
.operands
[2].isscalar
)
16054 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16055 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16056 NEON_ENCODE (SCALAR
, inst
);
16057 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16061 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16062 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16063 NEON_ENCODE (INTEGER
, inst
);
16064 neon_mixed_length (et
, et
.size
);
16069 do_neon_mac_maybe_scalar_long (void)
16071 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16075 do_neon_dyadic_wide (void)
16077 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
16078 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16079 neon_mixed_length (et
, et
.size
);
16083 do_neon_dyadic_narrow (void)
16085 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16086 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
16087 /* Operand sign is unimportant, and the U bit is part of the opcode,
16088 so force the operand type to integer. */
16089 et
.type
= NT_integer
;
16090 neon_mixed_length (et
, et
.size
/ 2);
16094 do_neon_mul_sat_scalar_long (void)
16096 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
16100 do_neon_vmull (void)
16102 if (inst
.operands
[2].isscalar
)
16103 do_neon_mac_maybe_scalar_long ();
16106 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16107 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
16109 if (et
.type
== NT_poly
)
16110 NEON_ENCODE (POLY
, inst
);
16112 NEON_ENCODE (INTEGER
, inst
);
16114 /* For polynomial encoding the U bit must be zero, and the size must
16115 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16116 obviously, as 0b10). */
16119 /* Check we're on the correct architecture. */
16120 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
16122 _("Instruction form not available on this architecture.");
16127 neon_mixed_length (et
, et
.size
);
16134 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
16135 struct neon_type_el et
= neon_check_type (3, rs
,
16136 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16137 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
16139 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
16140 _("shift out of range"));
16141 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16142 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16143 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16144 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16145 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16146 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16147 inst
.instruction
|= neon_quad (rs
) << 6;
16148 inst
.instruction
|= imm
<< 8;
16150 neon_dp_fixup (&inst
);
16156 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16157 struct neon_type_el et
= neon_check_type (2, rs
,
16158 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16159 unsigned op
= (inst
.instruction
>> 7) & 3;
16160 /* N (width of reversed regions) is encoded as part of the bitmask. We
16161 extract it here to check the elements to be reversed are smaller.
16162 Otherwise we'd get a reserved instruction. */
16163 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
16164 gas_assert (elsize
!= 0);
16165 constraint (et
.size
>= elsize
,
16166 _("elements must be smaller than reversal region"));
16167 neon_two_same (neon_quad (rs
), 1, et
.size
);
16173 if (inst
.operands
[1].isscalar
)
16175 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16176 struct neon_type_el et
= neon_check_type (2, rs
,
16177 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16178 unsigned sizebits
= et
.size
>> 3;
16179 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16180 int logsize
= neon_logbits (et
.size
);
16181 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16183 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16186 NEON_ENCODE (SCALAR
, inst
);
16187 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16188 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16189 inst
.instruction
|= LOW4 (dm
);
16190 inst
.instruction
|= HI1 (dm
) << 5;
16191 inst
.instruction
|= neon_quad (rs
) << 6;
16192 inst
.instruction
|= x
<< 17;
16193 inst
.instruction
|= sizebits
<< 16;
16195 neon_dp_fixup (&inst
);
16199 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16200 struct neon_type_el et
= neon_check_type (2, rs
,
16201 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16202 /* Duplicate ARM register to lanes of vector. */
16203 NEON_ENCODE (ARMREG
, inst
);
16206 case 8: inst
.instruction
|= 0x400000; break;
16207 case 16: inst
.instruction
|= 0x000020; break;
16208 case 32: inst
.instruction
|= 0x000000; break;
16211 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16212 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16213 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16214 inst
.instruction
|= neon_quad (rs
) << 21;
16215 /* The encoding for this instruction is identical for the ARM and Thumb
16216 variants, except for the condition field. */
16217 do_vfp_cond_or_thumb ();
16221 /* VMOV has particularly many variations. It can be one of:
16222 0. VMOV<c><q> <Qd>, <Qm>
16223 1. VMOV<c><q> <Dd>, <Dm>
16224 (Register operations, which are VORR with Rm = Rn.)
16225 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16226 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16228 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16229 (ARM register to scalar.)
16230 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16231 (Two ARM registers to vector.)
16232 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16233 (Scalar to ARM register.)
16234 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16235 (Vector to two ARM registers.)
16236 8. VMOV.F32 <Sd>, <Sm>
16237 9. VMOV.F64 <Dd>, <Dm>
16238 (VFP register moves.)
16239 10. VMOV.F32 <Sd>, #imm
16240 11. VMOV.F64 <Dd>, #imm
16241 (VFP float immediate load.)
16242 12. VMOV <Rd>, <Sm>
16243 (VFP single to ARM reg.)
16244 13. VMOV <Sd>, <Rm>
16245 (ARM reg to VFP single.)
16246 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16247 (Two ARM regs to two VFP singles.)
16248 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16249 (Two VFP singles to two ARM regs.)
16251 These cases can be disambiguated using neon_select_shape, except cases 1/9
16252 and 3/11 which depend on the operand type too.
16254 All the encoded bits are hardcoded by this function.
16256 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16257 Cases 5, 7 may be used with VFPv2 and above.
16259 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16260 can specify a type where it doesn't make sense to, and is ignored). */
16265 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16266 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16267 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16268 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16269 struct neon_type_el et
;
16270 const char *ldconst
= 0;
16274 case NS_DD
: /* case 1/9. */
16275 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16276 /* It is not an error here if no type is given. */
16278 if (et
.type
== NT_float
&& et
.size
== 64)
16280 do_vfp_nsyn_opcode ("fcpyd");
16283 /* fall through. */
16285 case NS_QQ
: /* case 0/1. */
16287 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16289 /* The architecture manual I have doesn't explicitly state which
16290 value the U bit should have for register->register moves, but
16291 the equivalent VORR instruction has U = 0, so do that. */
16292 inst
.instruction
= 0x0200110;
16293 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16294 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16295 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16296 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16297 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16298 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16299 inst
.instruction
|= neon_quad (rs
) << 6;
16301 neon_dp_fixup (&inst
);
16305 case NS_DI
: /* case 3/11. */
16306 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16308 if (et
.type
== NT_float
&& et
.size
== 64)
16310 /* case 11 (fconstd). */
16311 ldconst
= "fconstd";
16312 goto encode_fconstd
;
16314 /* fall through. */
16316 case NS_QI
: /* case 2/3. */
16317 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16319 inst
.instruction
= 0x0800010;
16320 neon_move_immediate ();
16321 neon_dp_fixup (&inst
);
16324 case NS_SR
: /* case 4. */
16326 unsigned bcdebits
= 0;
16328 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
16329 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
16331 /* .<size> is optional here, defaulting to .32. */
16332 if (inst
.vectype
.elems
== 0
16333 && inst
.operands
[0].vectype
.type
== NT_invtype
16334 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16336 inst
.vectype
.el
[0].type
= NT_untyped
;
16337 inst
.vectype
.el
[0].size
= 32;
16338 inst
.vectype
.elems
= 1;
16341 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16342 logsize
= neon_logbits (et
.size
);
16344 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16346 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16347 && et
.size
!= 32, _(BAD_FPU
));
16348 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16349 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16353 case 8: bcdebits
= 0x8; break;
16354 case 16: bcdebits
= 0x1; break;
16355 case 32: bcdebits
= 0x0; break;
16359 bcdebits
|= x
<< logsize
;
16361 inst
.instruction
= 0xe000b10;
16362 do_vfp_cond_or_thumb ();
16363 inst
.instruction
|= LOW4 (dn
) << 16;
16364 inst
.instruction
|= HI1 (dn
) << 7;
16365 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16366 inst
.instruction
|= (bcdebits
& 3) << 5;
16367 inst
.instruction
|= (bcdebits
>> 2) << 21;
16371 case NS_DRR
: /* case 5 (fmdrr). */
16372 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16375 inst
.instruction
= 0xc400b10;
16376 do_vfp_cond_or_thumb ();
16377 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16378 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16379 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16380 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16383 case NS_RS
: /* case 6. */
16386 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16387 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16388 unsigned abcdebits
= 0;
16390 /* .<dt> is optional here, defaulting to .32. */
16391 if (inst
.vectype
.elems
== 0
16392 && inst
.operands
[0].vectype
.type
== NT_invtype
16393 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16395 inst
.vectype
.el
[0].type
= NT_untyped
;
16396 inst
.vectype
.el
[0].size
= 32;
16397 inst
.vectype
.elems
= 1;
16400 et
= neon_check_type (2, NS_NULL
,
16401 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16402 logsize
= neon_logbits (et
.size
);
16404 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16406 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16407 && et
.size
!= 32, _(BAD_FPU
));
16408 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16409 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16413 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16414 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16415 case 32: abcdebits
= 0x00; break;
16419 abcdebits
|= x
<< logsize
;
16420 inst
.instruction
= 0xe100b10;
16421 do_vfp_cond_or_thumb ();
16422 inst
.instruction
|= LOW4 (dn
) << 16;
16423 inst
.instruction
|= HI1 (dn
) << 7;
16424 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16425 inst
.instruction
|= (abcdebits
& 3) << 5;
16426 inst
.instruction
|= (abcdebits
>> 2) << 21;
16430 case NS_RRD
: /* case 7 (fmrrd). */
16431 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16434 inst
.instruction
= 0xc500b10;
16435 do_vfp_cond_or_thumb ();
16436 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16437 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16438 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16439 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16442 case NS_FF
: /* case 8 (fcpys). */
16443 do_vfp_nsyn_opcode ("fcpys");
16447 case NS_FI
: /* case 10 (fconsts). */
16448 ldconst
= "fconsts";
16450 if (is_quarter_float (inst
.operands
[1].imm
))
16452 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16453 do_vfp_nsyn_opcode (ldconst
);
16455 /* ARMv8.2 fp16 vmov.f16 instruction. */
16457 do_scalar_fp16_v82_encode ();
16460 first_error (_("immediate out of range"));
16464 case NS_RF
: /* case 12 (fmrs). */
16465 do_vfp_nsyn_opcode ("fmrs");
16466 /* ARMv8.2 fp16 vmov.f16 instruction. */
16468 do_scalar_fp16_v82_encode ();
16472 case NS_FR
: /* case 13 (fmsr). */
16473 do_vfp_nsyn_opcode ("fmsr");
16474 /* ARMv8.2 fp16 vmov.f16 instruction. */
16476 do_scalar_fp16_v82_encode ();
16479 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16480 (one of which is a list), but we have parsed four. Do some fiddling to
16481 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16483 case NS_RRFF
: /* case 14 (fmrrs). */
16484 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16485 _("VFP registers must be adjacent"));
16486 inst
.operands
[2].imm
= 2;
16487 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16488 do_vfp_nsyn_opcode ("fmrrs");
16491 case NS_FFRR
: /* case 15 (fmsrr). */
16492 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16493 _("VFP registers must be adjacent"));
16494 inst
.operands
[1] = inst
.operands
[2];
16495 inst
.operands
[2] = inst
.operands
[3];
16496 inst
.operands
[0].imm
= 2;
16497 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16498 do_vfp_nsyn_opcode ("fmsrr");
16502 /* neon_select_shape has determined that the instruction
16503 shape is wrong and has already set the error message. */
16512 do_neon_rshift_round_imm (void)
16514 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16515 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16516 int imm
= inst
.operands
[2].imm
;
16518 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16521 inst
.operands
[2].present
= 0;
16526 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16527 _("immediate out of range for shift"));
16528 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16533 do_neon_movhf (void)
16535 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
16536 constraint (rs
!= NS_HH
, _("invalid suffix"));
16538 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16541 do_vfp_sp_monadic ();
16544 inst
.instruction
|= 0xf0000000;
16548 do_neon_movl (void)
16550 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16551 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16552 unsigned sizebits
= et
.size
>> 3;
16553 inst
.instruction
|= sizebits
<< 19;
16554 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16560 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16561 struct neon_type_el et
= neon_check_type (2, rs
,
16562 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16563 NEON_ENCODE (INTEGER
, inst
);
16564 neon_two_same (neon_quad (rs
), 1, et
.size
);
16568 do_neon_zip_uzp (void)
16570 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16571 struct neon_type_el et
= neon_check_type (2, rs
,
16572 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16573 if (rs
== NS_DD
&& et
.size
== 32)
16575 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16576 inst
.instruction
= N_MNEM_vtrn
;
16580 neon_two_same (neon_quad (rs
), 1, et
.size
);
16584 do_neon_sat_abs_neg (void)
16586 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16587 struct neon_type_el et
= neon_check_type (2, rs
,
16588 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16589 neon_two_same (neon_quad (rs
), 1, et
.size
);
16593 do_neon_pair_long (void)
16595 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16596 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16597 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16598 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16599 neon_two_same (neon_quad (rs
), 1, et
.size
);
16603 do_neon_recip_est (void)
16605 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16606 struct neon_type_el et
= neon_check_type (2, rs
,
16607 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
16608 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16609 neon_two_same (neon_quad (rs
), 1, et
.size
);
16615 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16616 struct neon_type_el et
= neon_check_type (2, rs
,
16617 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16618 neon_two_same (neon_quad (rs
), 1, et
.size
);
16624 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16625 struct neon_type_el et
= neon_check_type (2, rs
,
16626 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16627 neon_two_same (neon_quad (rs
), 1, et
.size
);
16633 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16634 struct neon_type_el et
= neon_check_type (2, rs
,
16635 N_EQK
| N_INT
, N_8
| N_KEY
);
16636 neon_two_same (neon_quad (rs
), 1, et
.size
);
16642 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16643 neon_two_same (neon_quad (rs
), 1, -1);
16647 do_neon_tbl_tbx (void)
16649 unsigned listlenbits
;
16650 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16652 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16654 first_error (_("bad list length for table lookup"));
16658 listlenbits
= inst
.operands
[1].imm
- 1;
16659 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16660 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16661 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16662 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16663 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16664 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16665 inst
.instruction
|= listlenbits
<< 8;
16667 neon_dp_fixup (&inst
);
16671 do_neon_ldm_stm (void)
16673 /* P, U and L bits are part of bitmask. */
16674 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16675 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16677 if (inst
.operands
[1].issingle
)
16679 do_vfp_nsyn_ldm_stm (is_dbmode
);
16683 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16684 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16686 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16687 _("register list must contain at least 1 and at most 16 "
16690 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16691 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16692 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16693 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16695 inst
.instruction
|= offsetbits
;
16697 do_vfp_cond_or_thumb ();
16701 do_neon_ldr_str (void)
16703 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16705 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16706 And is UNPREDICTABLE in thumb mode. */
16708 && inst
.operands
[1].reg
== REG_PC
16709 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16712 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16713 else if (warn_on_deprecated
)
16714 as_tsktsk (_("Use of PC here is deprecated"));
16717 if (inst
.operands
[0].issingle
)
16720 do_vfp_nsyn_opcode ("flds");
16722 do_vfp_nsyn_opcode ("fsts");
16724 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16725 if (inst
.vectype
.el
[0].size
== 16)
16726 do_scalar_fp16_v82_encode ();
16731 do_vfp_nsyn_opcode ("fldd");
16733 do_vfp_nsyn_opcode ("fstd");
16737 /* "interleave" version also handles non-interleaving register VLD1/VST1
16741 do_neon_ld_st_interleave (void)
16743 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16744 N_8
| N_16
| N_32
| N_64
);
16745 unsigned alignbits
= 0;
16747 /* The bits in this table go:
16748 0: register stride of one (0) or two (1)
16749 1,2: register list length, minus one (1, 2, 3, 4).
16750 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16751 We use -1 for invalid entries. */
16752 const int typetable
[] =
16754 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16755 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16756 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16757 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16761 if (et
.type
== NT_invtype
)
16764 if (inst
.operands
[1].immisalign
)
16765 switch (inst
.operands
[1].imm
>> 8)
16767 case 64: alignbits
= 1; break;
16769 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16770 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16771 goto bad_alignment
;
16775 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16776 goto bad_alignment
;
16781 first_error (_("bad alignment"));
16785 inst
.instruction
|= alignbits
<< 4;
16786 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16788 /* Bits [4:6] of the immediate in a list specifier encode register stride
16789 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16790 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16791 up the right value for "type" in a table based on this value and the given
16792 list style, then stick it back. */
16793 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16794 | (((inst
.instruction
>> 8) & 3) << 3);
16796 typebits
= typetable
[idx
];
16798 constraint (typebits
== -1, _("bad list type for instruction"));
16799 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16800 _("bad element type for instruction"));
16802 inst
.instruction
&= ~0xf00;
16803 inst
.instruction
|= typebits
<< 8;
16806 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16807 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16808 otherwise. The variable arguments are a list of pairs of legal (size, align)
16809 values, terminated with -1. */
16812 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
16815 int result
= FAIL
, thissize
, thisalign
;
16817 if (!inst
.operands
[1].immisalign
)
16823 va_start (ap
, do_alignment
);
16827 thissize
= va_arg (ap
, int);
16828 if (thissize
== -1)
16830 thisalign
= va_arg (ap
, int);
16832 if (size
== thissize
&& align
== thisalign
)
16835 while (result
!= SUCCESS
);
16839 if (result
== SUCCESS
)
16842 first_error (_("unsupported alignment for instruction"));
16848 do_neon_ld_st_lane (void)
16850 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16851 int align_good
, do_alignment
= 0;
16852 int logsize
= neon_logbits (et
.size
);
16853 int align
= inst
.operands
[1].imm
>> 8;
16854 int n
= (inst
.instruction
>> 8) & 3;
16855 int max_el
= 64 / et
.size
;
16857 if (et
.type
== NT_invtype
)
16860 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16861 _("bad list length"));
16862 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16863 _("scalar index out of range"));
16864 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16866 _("stride of 2 unavailable when element size is 8"));
16870 case 0: /* VLD1 / VST1. */
16871 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
16873 if (align_good
== FAIL
)
16877 unsigned alignbits
= 0;
16880 case 16: alignbits
= 0x1; break;
16881 case 32: alignbits
= 0x3; break;
16884 inst
.instruction
|= alignbits
<< 4;
16888 case 1: /* VLD2 / VST2. */
16889 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
16890 16, 32, 32, 64, -1);
16891 if (align_good
== FAIL
)
16894 inst
.instruction
|= 1 << 4;
16897 case 2: /* VLD3 / VST3. */
16898 constraint (inst
.operands
[1].immisalign
,
16899 _("can't use alignment with this instruction"));
16902 case 3: /* VLD4 / VST4. */
16903 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
16904 16, 64, 32, 64, 32, 128, -1);
16905 if (align_good
== FAIL
)
16909 unsigned alignbits
= 0;
16912 case 8: alignbits
= 0x1; break;
16913 case 16: alignbits
= 0x1; break;
16914 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16917 inst
.instruction
|= alignbits
<< 4;
16924 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16925 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16926 inst
.instruction
|= 1 << (4 + logsize
);
16928 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16929 inst
.instruction
|= logsize
<< 10;
16932 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16935 do_neon_ld_dup (void)
16937 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16938 int align_good
, do_alignment
= 0;
16940 if (et
.type
== NT_invtype
)
16943 switch ((inst
.instruction
>> 8) & 3)
16945 case 0: /* VLD1. */
16946 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16947 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16948 &do_alignment
, 16, 16, 32, 32, -1);
16949 if (align_good
== FAIL
)
16951 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
16954 case 2: inst
.instruction
|= 1 << 5; break;
16955 default: first_error (_("bad list length")); return;
16957 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16960 case 1: /* VLD2. */
16961 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16962 &do_alignment
, 8, 16, 16, 32, 32, 64,
16964 if (align_good
== FAIL
)
16966 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
16967 _("bad list length"));
16968 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16969 inst
.instruction
|= 1 << 5;
16970 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16973 case 2: /* VLD3. */
16974 constraint (inst
.operands
[1].immisalign
,
16975 _("can't use alignment with this instruction"));
16976 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
16977 _("bad list length"));
16978 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16979 inst
.instruction
|= 1 << 5;
16980 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16983 case 3: /* VLD4. */
16985 int align
= inst
.operands
[1].imm
>> 8;
16986 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
16987 16, 64, 32, 64, 32, 128, -1);
16988 if (align_good
== FAIL
)
16990 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
16991 _("bad list length"));
16992 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16993 inst
.instruction
|= 1 << 5;
16994 if (et
.size
== 32 && align
== 128)
16995 inst
.instruction
|= 0x3 << 6;
16997 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17004 inst
.instruction
|= do_alignment
<< 4;
17007 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17008 apart from bits [11:4]. */
17011 do_neon_ldx_stx (void)
17013 if (inst
.operands
[1].isreg
)
17014 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
17016 switch (NEON_LANE (inst
.operands
[0].imm
))
17018 case NEON_INTERLEAVE_LANES
:
17019 NEON_ENCODE (INTERLV
, inst
);
17020 do_neon_ld_st_interleave ();
17023 case NEON_ALL_LANES
:
17024 NEON_ENCODE (DUP
, inst
);
17025 if (inst
.instruction
== N_INV
)
17027 first_error ("only loads support such operands");
17034 NEON_ENCODE (LANE
, inst
);
17035 do_neon_ld_st_lane ();
17038 /* L bit comes from bit mask. */
17039 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17040 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17041 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17043 if (inst
.operands
[1].postind
)
17045 int postreg
= inst
.operands
[1].imm
& 0xf;
17046 constraint (!inst
.operands
[1].immisreg
,
17047 _("post-index must be a register"));
17048 constraint (postreg
== 0xd || postreg
== 0xf,
17049 _("bad register for post-index"));
17050 inst
.instruction
|= postreg
;
17054 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17055 constraint (inst
.reloc
.exp
.X_op
!= O_constant
17056 || inst
.reloc
.exp
.X_add_number
!= 0,
17059 if (inst
.operands
[1].writeback
)
17061 inst
.instruction
|= 0xd;
17064 inst
.instruction
|= 0xf;
17068 inst
.instruction
|= 0xf9000000;
17070 inst
.instruction
|= 0xf4000000;
17075 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
17077 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17078 D register operands. */
17079 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17080 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17083 NEON_ENCODE (FPV8
, inst
);
17085 if (rs
== NS_FFF
|| rs
== NS_HHH
)
17087 do_vfp_sp_dyadic ();
17089 /* ARMv8.2 fp16 instruction. */
17091 do_scalar_fp16_v82_encode ();
17094 do_vfp_dp_rd_rn_rm ();
17097 inst
.instruction
|= 0x100;
17099 inst
.instruction
|= 0xf0000000;
17105 set_it_insn_type (OUTSIDE_IT_INSN
);
17107 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
17108 first_error (_("invalid instruction shape"));
17114 set_it_insn_type (OUTSIDE_IT_INSN
);
17116 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
17119 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17122 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
17126 do_vrint_1 (enum neon_cvt_mode mode
)
17128 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
17129 struct neon_type_el et
;
17134 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17135 D register operands. */
17136 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17137 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17140 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
17142 if (et
.type
!= NT_invtype
)
17144 /* VFP encodings. */
17145 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17146 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
17147 set_it_insn_type (OUTSIDE_IT_INSN
);
17149 NEON_ENCODE (FPV8
, inst
);
17150 if (rs
== NS_FF
|| rs
== NS_HH
)
17151 do_vfp_sp_monadic ();
17153 do_vfp_dp_rd_rm ();
17157 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
17158 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
17159 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
17160 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
17161 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
17162 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
17163 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
17167 inst
.instruction
|= (rs
== NS_DD
) << 8;
17168 do_vfp_cond_or_thumb ();
17170 /* ARMv8.2 fp16 vrint instruction. */
17172 do_scalar_fp16_v82_encode ();
17176 /* Neon encodings (or something broken...). */
17178 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
17180 if (et
.type
== NT_invtype
)
17183 set_it_insn_type (OUTSIDE_IT_INSN
);
17184 NEON_ENCODE (FLOAT
, inst
);
17186 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17189 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17190 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17191 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17192 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17193 inst
.instruction
|= neon_quad (rs
) << 6;
17194 /* Mask off the original size bits and reencode them. */
17195 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
17196 | neon_logbits (et
.size
) << 18);
17200 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17201 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17202 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17203 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17204 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17205 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17206 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17211 inst
.instruction
|= 0xfc000000;
17213 inst
.instruction
|= 0xf0000000;
17220 do_vrint_1 (neon_cvt_mode_x
);
17226 do_vrint_1 (neon_cvt_mode_z
);
17232 do_vrint_1 (neon_cvt_mode_r
);
17238 do_vrint_1 (neon_cvt_mode_a
);
17244 do_vrint_1 (neon_cvt_mode_n
);
17250 do_vrint_1 (neon_cvt_mode_p
);
17256 do_vrint_1 (neon_cvt_mode_m
);
17259 /* Crypto v1 instructions. */
17261 do_crypto_2op_1 (unsigned elttype
, int op
)
17263 set_it_insn_type (OUTSIDE_IT_INSN
);
17265 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
17271 NEON_ENCODE (INTEGER
, inst
);
17272 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17273 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17274 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17275 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17277 inst
.instruction
|= op
<< 6;
17280 inst
.instruction
|= 0xfc000000;
17282 inst
.instruction
|= 0xf0000000;
17286 do_crypto_3op_1 (int u
, int op
)
17288 set_it_insn_type (OUTSIDE_IT_INSN
);
17290 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
17291 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
17296 NEON_ENCODE (INTEGER
, inst
);
17297 neon_three_same (1, u
, 8 << op
);
17303 do_crypto_2op_1 (N_8
, 0);
17309 do_crypto_2op_1 (N_8
, 1);
17315 do_crypto_2op_1 (N_8
, 2);
17321 do_crypto_2op_1 (N_8
, 3);
17327 do_crypto_3op_1 (0, 0);
17333 do_crypto_3op_1 (0, 1);
17339 do_crypto_3op_1 (0, 2);
17345 do_crypto_3op_1 (0, 3);
17351 do_crypto_3op_1 (1, 0);
17357 do_crypto_3op_1 (1, 1);
17361 do_sha256su1 (void)
17363 do_crypto_3op_1 (1, 2);
17369 do_crypto_2op_1 (N_32
, -1);
17375 do_crypto_2op_1 (N_32
, 0);
17379 do_sha256su0 (void)
17381 do_crypto_2op_1 (N_32
, 1);
17385 do_crc32_1 (unsigned int poly
, unsigned int sz
)
17387 unsigned int Rd
= inst
.operands
[0].reg
;
17388 unsigned int Rn
= inst
.operands
[1].reg
;
17389 unsigned int Rm
= inst
.operands
[2].reg
;
17391 set_it_insn_type (OUTSIDE_IT_INSN
);
17392 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
17393 inst
.instruction
|= LOW4 (Rn
) << 16;
17394 inst
.instruction
|= LOW4 (Rm
);
17395 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
17396 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
17398 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
17399 as_warn (UNPRED_REG ("r15"));
17400 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
17401 as_warn (UNPRED_REG ("r13"));
17441 /* Overall per-instruction processing. */
17443 /* We need to be able to fix up arbitrary expressions in some statements.
17444 This is so that we can handle symbols that are an arbitrary distance from
17445 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17446 which returns part of an address in a form which will be valid for
17447 a data instruction. We do this by pushing the expression into a symbol
17448 in the expr_section, and creating a fix for that. */
17451 fix_new_arm (fragS
* frag
,
17465 /* Create an absolute valued symbol, so we have something to
17466 refer to in the object file. Unfortunately for us, gas's
17467 generic expression parsing will already have folded out
17468 any use of .set foo/.type foo %function that may have
17469 been used to set type information of the target location,
17470 that's being specified symbolically. We have to presume
17471 the user knows what they are doing. */
17475 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17477 symbol
= symbol_find_or_make (name
);
17478 S_SET_SEGMENT (symbol
, absolute_section
);
17479 symbol_set_frag (symbol
, &zero_address_frag
);
17480 S_SET_VALUE (symbol
, exp
->X_add_number
);
17481 exp
->X_op
= O_symbol
;
17482 exp
->X_add_symbol
= symbol
;
17483 exp
->X_add_number
= 0;
17489 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17490 (enum bfd_reloc_code_real
) reloc
);
17494 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17495 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17499 /* Mark whether the fix is to a THUMB instruction, or an ARM
17501 new_fix
->tc_fix_data
= thumb_mode
;
17504 /* Create a frg for an instruction requiring relaxation. */
17506 output_relax_insn (void)
17512 /* The size of the instruction is unknown, so tie the debug info to the
17513 start of the instruction. */
17514 dwarf2_emit_insn (0);
17516 switch (inst
.reloc
.exp
.X_op
)
17519 sym
= inst
.reloc
.exp
.X_add_symbol
;
17520 offset
= inst
.reloc
.exp
.X_add_number
;
17524 offset
= inst
.reloc
.exp
.X_add_number
;
17527 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17531 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17532 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17533 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17536 /* Write a 32-bit thumb instruction to buf. */
17538 put_thumb32_insn (char * buf
, unsigned long insn
)
17540 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17541 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17545 output_inst (const char * str
)
17551 as_bad ("%s -- `%s'", inst
.error
, str
);
17556 output_relax_insn ();
17559 if (inst
.size
== 0)
17562 to
= frag_more (inst
.size
);
17563 /* PR 9814: Record the thumb mode into the current frag so that we know
17564 what type of NOP padding to use, if necessary. We override any previous
17565 setting so that if the mode has changed then the NOPS that we use will
17566 match the encoding of the last instruction in the frag. */
17567 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17569 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17571 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17572 put_thumb32_insn (to
, inst
.instruction
);
17574 else if (inst
.size
> INSN_SIZE
)
17576 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17577 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17578 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17581 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17583 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17584 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17585 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17588 dwarf2_emit_insn (inst
.size
);
17592 output_it_inst (int cond
, int mask
, char * to
)
17594 unsigned long instruction
= 0xbf00;
17597 instruction
|= mask
;
17598 instruction
|= cond
<< 4;
17602 to
= frag_more (2);
17604 dwarf2_emit_insn (2);
17608 md_number_to_chars (to
, instruction
, 2);
17613 /* Tag values used in struct asm_opcode's tag field. */
17616 OT_unconditional
, /* Instruction cannot be conditionalized.
17617 The ARM condition field is still 0xE. */
17618 OT_unconditionalF
, /* Instruction cannot be conditionalized
17619 and carries 0xF in its ARM condition field. */
17620 OT_csuffix
, /* Instruction takes a conditional suffix. */
17621 OT_csuffixF
, /* Some forms of the instruction take a conditional
17622 suffix, others place 0xF where the condition field
17624 OT_cinfix3
, /* Instruction takes a conditional infix,
17625 beginning at character index 3. (In
17626 unified mode, it becomes a suffix.) */
17627 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17628 tsts, cmps, cmns, and teqs. */
17629 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17630 character index 3, even in unified mode. Used for
17631 legacy instructions where suffix and infix forms
17632 may be ambiguous. */
17633 OT_csuf_or_in3
, /* Instruction takes either a conditional
17634 suffix or an infix at character index 3. */
17635 OT_odd_infix_unc
, /* This is the unconditional variant of an
17636 instruction that takes a conditional infix
17637 at an unusual position. In unified mode,
17638 this variant will accept a suffix. */
17639 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17640 are the conditional variants of instructions that
17641 take conditional infixes in unusual positions.
17642 The infix appears at character index
17643 (tag - OT_odd_infix_0). These are not accepted
17644 in unified mode. */
17647 /* Subroutine of md_assemble, responsible for looking up the primary
17648 opcode from the mnemonic the user wrote. STR points to the
17649 beginning of the mnemonic.
17651 This is not simply a hash table lookup, because of conditional
17652 variants. Most instructions have conditional variants, which are
17653 expressed with a _conditional affix_ to the mnemonic. If we were
17654 to encode each conditional variant as a literal string in the opcode
17655 table, it would have approximately 20,000 entries.
17657 Most mnemonics take this affix as a suffix, and in unified syntax,
17658 'most' is upgraded to 'all'. However, in the divided syntax, some
17659 instructions take the affix as an infix, notably the s-variants of
17660 the arithmetic instructions. Of those instructions, all but six
17661 have the infix appear after the third character of the mnemonic.
17663 Accordingly, the algorithm for looking up primary opcodes given
17666 1. Look up the identifier in the opcode table.
17667 If we find a match, go to step U.
17669 2. Look up the last two characters of the identifier in the
17670 conditions table. If we find a match, look up the first N-2
17671 characters of the identifier in the opcode table. If we
17672 find a match, go to step CE.
17674 3. Look up the fourth and fifth characters of the identifier in
17675 the conditions table. If we find a match, extract those
17676 characters from the identifier, and look up the remaining
17677 characters in the opcode table. If we find a match, go
17682 U. Examine the tag field of the opcode structure, in case this is
17683 one of the six instructions with its conditional infix in an
17684 unusual place. If it is, the tag tells us where to find the
17685 infix; look it up in the conditions table and set inst.cond
17686 accordingly. Otherwise, this is an unconditional instruction.
17687 Again set inst.cond accordingly. Return the opcode structure.
17689 CE. Examine the tag field to make sure this is an instruction that
17690 should receive a conditional suffix. If it is not, fail.
17691 Otherwise, set inst.cond from the suffix we already looked up,
17692 and return the opcode structure.
17694 CM. Examine the tag field to make sure this is an instruction that
17695 should receive a conditional infix after the third character.
17696 If it is not, fail. Otherwise, undo the edits to the current
17697 line of input and proceed as for case CE. */
17699 static const struct asm_opcode
*
17700 opcode_lookup (char **str
)
17704 const struct asm_opcode
*opcode
;
17705 const struct asm_cond
*cond
;
17708 /* Scan up to the end of the mnemonic, which must end in white space,
17709 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17710 for (base
= end
= *str
; *end
!= '\0'; end
++)
17711 if (*end
== ' ' || *end
== '.')
17717 /* Handle a possible width suffix and/or Neon type suffix. */
17722 /* The .w and .n suffixes are only valid if the unified syntax is in
17724 if (unified_syntax
&& end
[1] == 'w')
17726 else if (unified_syntax
&& end
[1] == 'n')
17731 inst
.vectype
.elems
= 0;
17733 *str
= end
+ offset
;
17735 if (end
[offset
] == '.')
17737 /* See if we have a Neon type suffix (possible in either unified or
17738 non-unified ARM syntax mode). */
17739 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17742 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17748 /* Look for unaffixed or special-case affixed mnemonic. */
17749 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17754 if (opcode
->tag
< OT_odd_infix_0
)
17756 inst
.cond
= COND_ALWAYS
;
17760 if (warn_on_deprecated
&& unified_syntax
)
17761 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17762 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17763 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17766 inst
.cond
= cond
->value
;
17770 /* Cannot have a conditional suffix on a mnemonic of less than two
17772 if (end
- base
< 3)
17775 /* Look for suffixed mnemonic. */
17777 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17778 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17780 if (opcode
&& cond
)
17783 switch (opcode
->tag
)
17785 case OT_cinfix3_legacy
:
17786 /* Ignore conditional suffixes matched on infix only mnemonics. */
17790 case OT_cinfix3_deprecated
:
17791 case OT_odd_infix_unc
:
17792 if (!unified_syntax
)
17794 /* Fall through. */
17798 case OT_csuf_or_in3
:
17799 inst
.cond
= cond
->value
;
17802 case OT_unconditional
:
17803 case OT_unconditionalF
:
17805 inst
.cond
= cond
->value
;
17808 /* Delayed diagnostic. */
17809 inst
.error
= BAD_COND
;
17810 inst
.cond
= COND_ALWAYS
;
17819 /* Cannot have a usual-position infix on a mnemonic of less than
17820 six characters (five would be a suffix). */
17821 if (end
- base
< 6)
17824 /* Look for infixed mnemonic in the usual position. */
17826 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17830 memcpy (save
, affix
, 2);
17831 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17832 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17834 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17835 memcpy (affix
, save
, 2);
17838 && (opcode
->tag
== OT_cinfix3
17839 || opcode
->tag
== OT_cinfix3_deprecated
17840 || opcode
->tag
== OT_csuf_or_in3
17841 || opcode
->tag
== OT_cinfix3_legacy
))
17844 if (warn_on_deprecated
&& unified_syntax
17845 && (opcode
->tag
== OT_cinfix3
17846 || opcode
->tag
== OT_cinfix3_deprecated
))
17847 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17849 inst
.cond
= cond
->value
;
17856 /* This function generates an initial IT instruction, leaving its block
17857 virtually open for the new instructions. Eventually,
17858 the mask will be updated by now_it_add_mask () each time
17859 a new instruction needs to be included in the IT block.
17860 Finally, the block is closed with close_automatic_it_block ().
17861 The block closure can be requested either from md_assemble (),
17862 a tencode (), or due to a label hook. */
17865 new_automatic_it_block (int cond
)
17867 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17868 now_it
.mask
= 0x18;
17870 now_it
.block_length
= 1;
17871 mapping_state (MAP_THUMB
);
17872 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
17873 now_it
.warn_deprecated
= FALSE
;
17874 now_it
.insn_cond
= TRUE
;
17877 /* Close an automatic IT block.
17878 See comments in new_automatic_it_block (). */
17881 close_automatic_it_block (void)
17883 now_it
.mask
= 0x10;
17884 now_it
.block_length
= 0;
17887 /* Update the mask of the current automatically-generated IT
17888 instruction. See comments in new_automatic_it_block (). */
17891 now_it_add_mask (int cond
)
17893 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17894 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17895 | ((bitvalue) << (nbit)))
17896 const int resulting_bit
= (cond
& 1);
17898 now_it
.mask
&= 0xf;
17899 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17901 (5 - now_it
.block_length
));
17902 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17904 ((5 - now_it
.block_length
) - 1) );
17905 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
17908 #undef SET_BIT_VALUE
17911 /* The IT blocks handling machinery is accessed through the these functions:
17912 it_fsm_pre_encode () from md_assemble ()
17913 set_it_insn_type () optional, from the tencode functions
17914 set_it_insn_type_last () ditto
17915 in_it_block () ditto
17916 it_fsm_post_encode () from md_assemble ()
17917 force_automatic_it_block_close () from label habdling functions
17920 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17921 initializing the IT insn type with a generic initial value depending
17922 on the inst.condition.
17923 2) During the tencode function, two things may happen:
17924 a) The tencode function overrides the IT insn type by
17925 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17926 b) The tencode function queries the IT block state by
17927 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17929 Both set_it_insn_type and in_it_block run the internal FSM state
17930 handling function (handle_it_state), because: a) setting the IT insn
17931 type may incur in an invalid state (exiting the function),
17932 and b) querying the state requires the FSM to be updated.
17933 Specifically we want to avoid creating an IT block for conditional
17934 branches, so it_fsm_pre_encode is actually a guess and we can't
17935 determine whether an IT block is required until the tencode () routine
17936 has decided what type of instruction this actually it.
17937 Because of this, if set_it_insn_type and in_it_block have to be used,
17938 set_it_insn_type has to be called first.
17940 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17941 determines the insn IT type depending on the inst.cond code.
17942 When a tencode () routine encodes an instruction that can be
17943 either outside an IT block, or, in the case of being inside, has to be
17944 the last one, set_it_insn_type_last () will determine the proper
17945 IT instruction type based on the inst.cond code. Otherwise,
17946 set_it_insn_type can be called for overriding that logic or
17947 for covering other cases.
17949 Calling handle_it_state () may not transition the IT block state to
17950 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17951 still queried. Instead, if the FSM determines that the state should
17952 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17953 after the tencode () function: that's what it_fsm_post_encode () does.
17955 Since in_it_block () calls the state handling function to get an
17956 updated state, an error may occur (due to invalid insns combination).
17957 In that case, inst.error is set.
17958 Therefore, inst.error has to be checked after the execution of
17959 the tencode () routine.
17961 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17962 any pending state change (if any) that didn't take place in
17963 handle_it_state () as explained above. */
17966 it_fsm_pre_encode (void)
17968 if (inst
.cond
!= COND_ALWAYS
)
17969 inst
.it_insn_type
= INSIDE_IT_INSN
;
17971 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
17973 now_it
.state_handled
= 0;
17976 /* IT state FSM handling function. */
17979 handle_it_state (void)
17981 now_it
.state_handled
= 1;
17982 now_it
.insn_cond
= FALSE
;
17984 switch (now_it
.state
)
17986 case OUTSIDE_IT_BLOCK
:
17987 switch (inst
.it_insn_type
)
17989 case OUTSIDE_IT_INSN
:
17992 case INSIDE_IT_INSN
:
17993 case INSIDE_IT_LAST_INSN
:
17994 if (thumb_mode
== 0)
17997 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
17998 as_tsktsk (_("Warning: conditional outside an IT block"\
18003 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
18004 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
18006 /* Automatically generate the IT instruction. */
18007 new_automatic_it_block (inst
.cond
);
18008 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
18009 close_automatic_it_block ();
18013 inst
.error
= BAD_OUT_IT
;
18019 case IF_INSIDE_IT_LAST_INSN
:
18020 case NEUTRAL_IT_INSN
:
18024 now_it
.state
= MANUAL_IT_BLOCK
;
18025 now_it
.block_length
= 0;
18030 case AUTOMATIC_IT_BLOCK
:
18031 /* Three things may happen now:
18032 a) We should increment current it block size;
18033 b) We should close current it block (closing insn or 4 insns);
18034 c) We should close current it block and start a new one (due
18035 to incompatible conditions or
18036 4 insns-length block reached). */
18038 switch (inst
.it_insn_type
)
18040 case OUTSIDE_IT_INSN
:
18041 /* The closure of the block shall happen immediatelly,
18042 so any in_it_block () call reports the block as closed. */
18043 force_automatic_it_block_close ();
18046 case INSIDE_IT_INSN
:
18047 case INSIDE_IT_LAST_INSN
:
18048 case IF_INSIDE_IT_LAST_INSN
:
18049 now_it
.block_length
++;
18051 if (now_it
.block_length
> 4
18052 || !now_it_compatible (inst
.cond
))
18054 force_automatic_it_block_close ();
18055 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
18056 new_automatic_it_block (inst
.cond
);
18060 now_it
.insn_cond
= TRUE
;
18061 now_it_add_mask (inst
.cond
);
18064 if (now_it
.state
== AUTOMATIC_IT_BLOCK
18065 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
18066 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
18067 close_automatic_it_block ();
18070 case NEUTRAL_IT_INSN
:
18071 now_it
.block_length
++;
18072 now_it
.insn_cond
= TRUE
;
18074 if (now_it
.block_length
> 4)
18075 force_automatic_it_block_close ();
18077 now_it_add_mask (now_it
.cc
& 1);
18081 close_automatic_it_block ();
18082 now_it
.state
= MANUAL_IT_BLOCK
;
18087 case MANUAL_IT_BLOCK
:
18089 /* Check conditional suffixes. */
18090 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
18093 now_it
.mask
&= 0x1f;
18094 is_last
= (now_it
.mask
== 0x10);
18095 now_it
.insn_cond
= TRUE
;
18097 switch (inst
.it_insn_type
)
18099 case OUTSIDE_IT_INSN
:
18100 inst
.error
= BAD_NOT_IT
;
18103 case INSIDE_IT_INSN
:
18104 if (cond
!= inst
.cond
)
18106 inst
.error
= BAD_IT_COND
;
18111 case INSIDE_IT_LAST_INSN
:
18112 case IF_INSIDE_IT_LAST_INSN
:
18113 if (cond
!= inst
.cond
)
18115 inst
.error
= BAD_IT_COND
;
18120 inst
.error
= BAD_BRANCH
;
18125 case NEUTRAL_IT_INSN
:
18126 /* The BKPT instruction is unconditional even in an IT block. */
18130 inst
.error
= BAD_IT_IT
;
18140 struct depr_insn_mask
18142 unsigned long pattern
;
18143 unsigned long mask
;
18144 const char* description
;
18147 /* List of 16-bit instruction patterns deprecated in an IT block in
18149 static const struct depr_insn_mask depr_it_insns
[] = {
18150 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18151 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18152 { 0xa000, 0xb800, N_("ADR") },
18153 { 0x4800, 0xf800, N_("Literal loads") },
18154 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18155 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18156 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18157 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18158 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18163 it_fsm_post_encode (void)
18167 if (!now_it
.state_handled
)
18168 handle_it_state ();
18170 if (now_it
.insn_cond
18171 && !now_it
.warn_deprecated
18172 && warn_on_deprecated
18173 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
18175 if (inst
.instruction
>= 0x10000)
18177 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18178 "deprecated in ARMv8"));
18179 now_it
.warn_deprecated
= TRUE
;
18183 const struct depr_insn_mask
*p
= depr_it_insns
;
18185 while (p
->mask
!= 0)
18187 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
18189 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18190 "of the following class are deprecated in ARMv8: "
18191 "%s"), p
->description
);
18192 now_it
.warn_deprecated
= TRUE
;
18200 if (now_it
.block_length
> 1)
18202 as_tsktsk (_("IT blocks containing more than one conditional "
18203 "instruction are deprecated in ARMv8"));
18204 now_it
.warn_deprecated
= TRUE
;
18208 is_last
= (now_it
.mask
== 0x10);
18211 now_it
.state
= OUTSIDE_IT_BLOCK
;
18217 force_automatic_it_block_close (void)
18219 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
18221 close_automatic_it_block ();
18222 now_it
.state
= OUTSIDE_IT_BLOCK
;
18230 if (!now_it
.state_handled
)
18231 handle_it_state ();
18233 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
18236 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18237 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18238 here, hence the "known" in the function name. */
18241 known_t32_only_insn (const struct asm_opcode
*opcode
)
18243 /* Original Thumb-1 wide instruction. */
18244 if (opcode
->tencode
== do_t_blx
18245 || opcode
->tencode
== do_t_branch23
18246 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
18247 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
18250 /* Wide-only instruction added to ARMv8-M Baseline. */
18251 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
18252 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
18253 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
18254 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
18260 /* Whether wide instruction variant can be used if available for a valid OPCODE
18264 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
18266 if (known_t32_only_insn (opcode
))
18269 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18270 of variant T3 of B.W is checked in do_t_branch. */
18271 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18272 && opcode
->tencode
== do_t_branch
)
18275 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18276 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18277 && opcode
->tencode
== do_t_mov_cmp
18278 /* Make sure CMP instruction is not affected. */
18279 && opcode
->aencode
== do_mov
)
18282 /* Wide instruction variants of all instructions with narrow *and* wide
18283 variants become available with ARMv6t2. Other opcodes are either
18284 narrow-only or wide-only and are thus available if OPCODE is valid. */
18285 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
18288 /* OPCODE with narrow only instruction variant or wide variant not
18294 md_assemble (char *str
)
18297 const struct asm_opcode
* opcode
;
18299 /* Align the previous label if needed. */
18300 if (last_label_seen
!= NULL
)
18302 symbol_set_frag (last_label_seen
, frag_now
);
18303 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
18304 S_SET_SEGMENT (last_label_seen
, now_seg
);
18307 memset (&inst
, '\0', sizeof (inst
));
18308 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
18310 opcode
= opcode_lookup (&p
);
18313 /* It wasn't an instruction, but it might be a register alias of
18314 the form alias .req reg, or a Neon .dn/.qn directive. */
18315 if (! create_register_alias (str
, p
)
18316 && ! create_neon_reg_alias (str
, p
))
18317 as_bad (_("bad instruction `%s'"), str
);
18322 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
18323 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18325 /* The value which unconditional instructions should have in place of the
18326 condition field. */
18327 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
18331 arm_feature_set variant
;
18333 variant
= cpu_variant
;
18334 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18335 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
18336 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
18337 /* Check that this instruction is supported for this CPU. */
18338 if (!opcode
->tvariant
18339 || (thumb_mode
== 1
18340 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
18342 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
18345 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
18346 && opcode
->tencode
!= do_t_branch
)
18348 as_bad (_("Thumb does not support conditional execution"));
18352 /* Two things are addressed here:
18353 1) Implicit require narrow instructions on Thumb-1.
18354 This avoids relaxation accidentally introducing Thumb-2
18356 2) Reject wide instructions in non Thumb-2 cores.
18358 Only instructions with narrow and wide variants need to be handled
18359 but selecting all non wide-only instructions is easier. */
18360 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
18361 && !t32_insn_ok (variant
, opcode
))
18363 if (inst
.size_req
== 0)
18365 else if (inst
.size_req
== 4)
18367 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
18368 as_bad (_("selected processor does not support 32bit wide "
18369 "variant of instruction `%s'"), str
);
18371 as_bad (_("selected processor does not support `%s' in "
18372 "Thumb-2 mode"), str
);
18377 inst
.instruction
= opcode
->tvalue
;
18379 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
18381 /* Prepare the it_insn_type for those encodings that don't set
18383 it_fsm_pre_encode ();
18385 opcode
->tencode ();
18387 it_fsm_post_encode ();
18390 if (!(inst
.error
|| inst
.relax
))
18392 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
18393 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
18394 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
18396 as_bad (_("cannot honor width suffix -- `%s'"), str
);
18401 /* Something has gone badly wrong if we try to relax a fixed size
18403 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
18405 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18406 *opcode
->tvariant
);
18407 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18408 set those bits when Thumb-2 32-bit instructions are seen. The impact
18409 of relaxable instructions will be considered later after we finish all
18411 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
18412 variant
= arm_arch_none
;
18414 variant
= cpu_variant
;
18415 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
18416 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18419 check_neon_suffixes
;
18423 mapping_state (MAP_THUMB
);
18426 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18430 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18431 is_bx
= (opcode
->aencode
== do_bx
);
18433 /* Check that this instruction is supported for this CPU. */
18434 if (!(is_bx
&& fix_v4bx
)
18435 && !(opcode
->avariant
&&
18436 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18438 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18443 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18447 inst
.instruction
= opcode
->avalue
;
18448 if (opcode
->tag
== OT_unconditionalF
)
18449 inst
.instruction
|= 0xFU
<< 28;
18451 inst
.instruction
|= inst
.cond
<< 28;
18452 inst
.size
= INSN_SIZE
;
18453 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18455 it_fsm_pre_encode ();
18456 opcode
->aencode ();
18457 it_fsm_post_encode ();
18459 /* Arm mode bx is marked as both v4T and v5 because it's still required
18460 on a hypothetical non-thumb v5 core. */
18462 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18464 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18465 *opcode
->avariant
);
18467 check_neon_suffixes
;
18471 mapping_state (MAP_ARM
);
18476 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18484 check_it_blocks_finished (void)
18489 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18490 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18491 == MANUAL_IT_BLOCK
)
18493 as_warn (_("section '%s' finished with an open IT block."),
18497 if (now_it
.state
== MANUAL_IT_BLOCK
)
18498 as_warn (_("file finished with an open IT block."));
18502 /* Various frobbings of labels and their addresses. */
18505 arm_start_line_hook (void)
18507 last_label_seen
= NULL
;
18511 arm_frob_label (symbolS
* sym
)
18513 last_label_seen
= sym
;
18515 ARM_SET_THUMB (sym
, thumb_mode
);
18517 #if defined OBJ_COFF || defined OBJ_ELF
18518 ARM_SET_INTERWORK (sym
, support_interwork
);
18521 force_automatic_it_block_close ();
18523 /* Note - do not allow local symbols (.Lxxx) to be labelled
18524 as Thumb functions. This is because these labels, whilst
18525 they exist inside Thumb code, are not the entry points for
18526 possible ARM->Thumb calls. Also, these labels can be used
18527 as part of a computed goto or switch statement. eg gcc
18528 can generate code that looks like this:
18530 ldr r2, [pc, .Laaa]
18540 The first instruction loads the address of the jump table.
18541 The second instruction converts a table index into a byte offset.
18542 The third instruction gets the jump address out of the table.
18543 The fourth instruction performs the jump.
18545 If the address stored at .Laaa is that of a symbol which has the
18546 Thumb_Func bit set, then the linker will arrange for this address
18547 to have the bottom bit set, which in turn would mean that the
18548 address computation performed by the third instruction would end
18549 up with the bottom bit set. Since the ARM is capable of unaligned
18550 word loads, the instruction would then load the incorrect address
18551 out of the jump table, and chaos would ensue. */
18552 if (label_is_thumb_function_name
18553 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18554 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18556 /* When the address of a Thumb function is taken the bottom
18557 bit of that address should be set. This will allow
18558 interworking between Arm and Thumb functions to work
18561 THUMB_SET_FUNC (sym
, 1);
18563 label_is_thumb_function_name
= FALSE
;
18566 dwarf2_emit_label (sym
);
18570 arm_data_in_code (void)
18572 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18574 *input_line_pointer
= '/';
18575 input_line_pointer
+= 5;
18576 *input_line_pointer
= 0;
18584 arm_canonicalize_symbol_name (char * name
)
18588 if (thumb_mode
&& (len
= strlen (name
)) > 5
18589 && streq (name
+ len
- 5, "/data"))
18590 *(name
+ len
- 5) = 0;
18595 /* Table of all register names defined by default. The user can
18596 define additional names with .req. Note that all register names
18597 should appear in both upper and lowercase variants. Some registers
18598 also have mixed-case names. */
18600 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18601 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18602 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18603 #define REGSET(p,t) \
18604 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18605 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18606 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18607 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18608 #define REGSETH(p,t) \
18609 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18610 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18611 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18612 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18613 #define REGSET2(p,t) \
18614 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18615 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18616 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18617 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18618 #define SPLRBANK(base,bank,t) \
18619 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18620 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18621 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18622 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18623 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18624 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18626 static const struct reg_entry reg_names
[] =
18628 /* ARM integer registers. */
18629 REGSET(r
, RN
), REGSET(R
, RN
),
18631 /* ATPCS synonyms. */
18632 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18633 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18634 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18636 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18637 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18638 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18640 /* Well-known aliases. */
18641 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18642 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18644 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18645 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18647 /* Coprocessor numbers. */
18648 REGSET(p
, CP
), REGSET(P
, CP
),
18650 /* Coprocessor register numbers. The "cr" variants are for backward
18652 REGSET(c
, CN
), REGSET(C
, CN
),
18653 REGSET(cr
, CN
), REGSET(CR
, CN
),
18655 /* ARM banked registers. */
18656 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18657 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18658 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18659 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18660 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18661 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18662 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18664 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18665 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18666 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18667 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18668 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18669 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18670 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18671 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18673 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18674 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18675 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18676 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18677 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18678 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18679 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18680 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18681 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18683 /* FPA registers. */
18684 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18685 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18687 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18688 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18690 /* VFP SP registers. */
18691 REGSET(s
,VFS
), REGSET(S
,VFS
),
18692 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18694 /* VFP DP Registers. */
18695 REGSET(d
,VFD
), REGSET(D
,VFD
),
18696 /* Extra Neon DP registers. */
18697 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18699 /* Neon QP registers. */
18700 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18702 /* VFP control registers. */
18703 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18704 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18705 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18706 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18707 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18708 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18710 /* Maverick DSP coprocessor registers. */
18711 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18712 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18714 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18715 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18716 REGDEF(dspsc
,0,DSPSC
),
18718 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18719 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18720 REGDEF(DSPSC
,0,DSPSC
),
18722 /* iWMMXt data registers - p0, c0-15. */
18723 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18725 /* iWMMXt control registers - p1, c0-3. */
18726 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18727 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18728 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18729 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18731 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18732 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18733 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18734 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18735 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18737 /* XScale accumulator registers. */
18738 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18744 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18745 within psr_required_here. */
18746 static const struct asm_psr psrs
[] =
18748 /* Backward compatibility notation. Note that "all" is no longer
18749 truly all possible PSR bits. */
18750 {"all", PSR_c
| PSR_f
},
18754 /* Individual flags. */
18760 /* Combinations of flags. */
18761 {"fs", PSR_f
| PSR_s
},
18762 {"fx", PSR_f
| PSR_x
},
18763 {"fc", PSR_f
| PSR_c
},
18764 {"sf", PSR_s
| PSR_f
},
18765 {"sx", PSR_s
| PSR_x
},
18766 {"sc", PSR_s
| PSR_c
},
18767 {"xf", PSR_x
| PSR_f
},
18768 {"xs", PSR_x
| PSR_s
},
18769 {"xc", PSR_x
| PSR_c
},
18770 {"cf", PSR_c
| PSR_f
},
18771 {"cs", PSR_c
| PSR_s
},
18772 {"cx", PSR_c
| PSR_x
},
18773 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18774 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18775 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18776 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18777 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18778 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18779 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18780 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18781 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18782 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18783 {"scf", PSR_s
| PSR_c
| PSR_f
},
18784 {"scx", PSR_s
| PSR_c
| PSR_x
},
18785 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18786 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18787 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18788 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18789 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18790 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18791 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18792 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18793 {"csf", PSR_c
| PSR_s
| PSR_f
},
18794 {"csx", PSR_c
| PSR_s
| PSR_x
},
18795 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18796 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18797 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18798 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18799 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18800 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18801 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18802 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18803 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18804 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18805 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18806 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18807 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18808 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18809 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18810 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18811 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18812 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18813 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18814 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18815 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18816 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18817 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18818 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18819 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18820 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18823 /* Table of V7M psr names. */
18824 static const struct asm_psr v7m_psrs
[] =
18826 {"apsr", 0x0 }, {"APSR", 0x0 },
18827 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
18828 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
18829 {"psr", 0x3 }, {"PSR", 0x3 },
18830 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
18831 {"ipsr", 0x5 }, {"IPSR", 0x5 },
18832 {"epsr", 0x6 }, {"EPSR", 0x6 },
18833 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
18834 {"msp", 0x8 }, {"MSP", 0x8 },
18835 {"psp", 0x9 }, {"PSP", 0x9 },
18836 {"msplim", 0xa }, {"MSPLIM", 0xa },
18837 {"psplim", 0xb }, {"PSPLIM", 0xb },
18838 {"primask", 0x10}, {"PRIMASK", 0x10},
18839 {"basepri", 0x11}, {"BASEPRI", 0x11},
18840 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
18841 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
18842 {"control", 0x14}, {"CONTROL", 0x14},
18843 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
18844 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
18845 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
18846 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
18847 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
18848 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
18849 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18850 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
18851 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
18854 /* Table of all shift-in-operand names. */
18855 static const struct asm_shift_name shift_names
[] =
18857 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18858 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18859 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18860 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18861 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18862 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18865 /* Table of all explicit relocation names. */
18867 static struct reloc_entry reloc_names
[] =
18869 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
18870 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
18871 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
18872 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
18873 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
18874 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
18875 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
18876 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
18877 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
18878 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
18879 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
18880 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
18881 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
18882 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
18883 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
18884 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
18885 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
18886 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
18890 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18891 static const struct asm_cond conds
[] =
18895 {"cs", 0x2}, {"hs", 0x2},
18896 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18910 #define UL_BARRIER(L,U,CODE,FEAT) \
18911 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18912 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18914 static struct asm_barrier_opt barrier_opt_names
[] =
18916 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
18917 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
18918 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
18919 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
18920 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
18921 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
18922 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
18923 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
18924 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
18925 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
18926 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
18927 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
18928 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
18929 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
18930 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
18931 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
18936 /* Table of ARM-format instructions. */
18938 /* Macros for gluing together operand strings. N.B. In all cases
18939 other than OPS0, the trailing OP_stop comes from default
18940 zero-initialization of the unspecified elements of the array. */
18941 #define OPS0() { OP_stop, }
18942 #define OPS1(a) { OP_##a, }
18943 #define OPS2(a,b) { OP_##a,OP_##b, }
18944 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18945 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18946 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18947 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18949 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18950 This is useful when mixing operands for ARM and THUMB, i.e. using the
18951 MIX_ARM_THUMB_OPERANDS macro.
18952 In order to use these macros, prefix the number of operands with _
18954 #define OPS_1(a) { a, }
18955 #define OPS_2(a,b) { a,b, }
18956 #define OPS_3(a,b,c) { a,b,c, }
18957 #define OPS_4(a,b,c,d) { a,b,c,d, }
18958 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18959 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18961 /* These macros abstract out the exact format of the mnemonic table and
18962 save some repeated characters. */
18964 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18965 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18966 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18967 THUMB_VARIANT, do_##ae, do_##te }
18969 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18970 a T_MNEM_xyz enumerator. */
18971 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18972 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18973 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18974 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18976 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18977 infix after the third character. */
18978 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18979 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18980 THUMB_VARIANT, do_##ae, do_##te }
18981 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18982 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18983 THUMB_VARIANT, do_##ae, do_##te }
18984 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18985 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18986 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18987 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18988 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18989 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18990 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18991 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18993 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18994 field is still 0xE. Many of the Thumb variants can be executed
18995 conditionally, so this is checked separately. */
18996 #define TUE(mnem, op, top, nops, ops, ae, te) \
18997 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18998 THUMB_VARIANT, do_##ae, do_##te }
19000 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19001 Used by mnemonics that have very minimal differences in the encoding for
19002 ARM and Thumb variants and can be handled in a common function. */
19003 #define TUEc(mnem, op, top, nops, ops, en) \
19004 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19005 THUMB_VARIANT, do_##en, do_##en }
19007 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19008 condition code field. */
19009 #define TUF(mnem, op, top, nops, ops, ae, te) \
19010 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19011 THUMB_VARIANT, do_##ae, do_##te }
19013 /* ARM-only variants of all the above. */
19014 #define CE(mnem, op, nops, ops, ae) \
19015 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19017 #define C3(mnem, op, nops, ops, ae) \
19018 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19020 /* Legacy mnemonics that always have conditional infix after the third
19022 #define CL(mnem, op, nops, ops, ae) \
19023 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19024 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19026 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19027 #define cCE(mnem, op, nops, ops, ae) \
19028 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19030 /* Legacy coprocessor instructions where conditional infix and conditional
19031 suffix are ambiguous. For consistency this includes all FPA instructions,
19032 not just the potentially ambiguous ones. */
19033 #define cCL(mnem, op, nops, ops, ae) \
19034 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19035 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19037 /* Coprocessor, takes either a suffix or a position-3 infix
19038 (for an FPA corner case). */
19039 #define C3E(mnem, op, nops, ops, ae) \
19040 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19041 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19043 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19044 { m1 #m2 m3, OPS##nops ops, \
19045 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19046 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19048 #define CM(m1, m2, op, nops, ops, ae) \
19049 xCM_ (m1, , m2, op, nops, ops, ae), \
19050 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19051 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19052 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19053 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19054 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19055 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19056 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19057 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19058 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19059 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19060 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19061 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19062 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19063 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19064 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19065 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19066 xCM_ (m1, le, m2, op, nops, ops, ae), \
19067 xCM_ (m1, al, m2, op, nops, ops, ae)
19069 #define UE(mnem, op, nops, ops, ae) \
19070 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19072 #define UF(mnem, op, nops, ops, ae) \
19073 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19075 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19076 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19077 use the same encoding function for each. */
19078 #define NUF(mnem, op, nops, ops, enc) \
19079 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19080 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19082 /* Neon data processing, version which indirects through neon_enc_tab for
19083 the various overloaded versions of opcodes. */
19084 #define nUF(mnem, op, nops, ops, enc) \
19085 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19086 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19088 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19090 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19091 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19092 THUMB_VARIANT, do_##enc, do_##enc }
19094 #define NCE(mnem, op, nops, ops, enc) \
19095 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19097 #define NCEF(mnem, op, nops, ops, enc) \
19098 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19100 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19101 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19102 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19103 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19105 #define nCE(mnem, op, nops, ops, enc) \
19106 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19108 #define nCEF(mnem, op, nops, ops, enc) \
19109 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19113 static const struct asm_opcode insns
[] =
19115 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19116 #define THUMB_VARIANT & arm_ext_v4t
19117 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19118 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19119 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19120 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19121 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19122 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19123 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19124 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19125 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19126 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19127 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19128 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19129 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19130 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19131 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19132 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19134 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19135 for setting PSR flag bits. They are obsolete in V6 and do not
19136 have Thumb equivalents. */
19137 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19138 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19139 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
19140 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19141 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19142 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
19143 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19144 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19145 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
19147 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
19148 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
19149 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19150 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19152 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
19153 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19154 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
19156 OP_ADDRGLDR
),ldst
, t_ldst
),
19157 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19159 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19160 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19161 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19162 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19163 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19164 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19166 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19167 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19168 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
19169 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
19172 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
19173 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
19174 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
19175 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
19177 /* Thumb-compatibility pseudo ops. */
19178 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19179 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19180 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19181 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19182 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19183 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19184 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19185 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19186 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
19187 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
19188 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
19189 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
19191 /* These may simplify to neg. */
19192 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19193 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19195 #undef THUMB_VARIANT
19196 #define THUMB_VARIANT & arm_ext_v6
19198 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
19200 /* V1 instructions with no Thumb analogue prior to V6T2. */
19201 #undef THUMB_VARIANT
19202 #define THUMB_VARIANT & arm_ext_v6t2
19204 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19205 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19206 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
19208 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19209 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19210 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
19211 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19213 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19214 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19216 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19217 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19219 /* V1 instructions with no Thumb analogue at all. */
19220 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
19221 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
19223 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19224 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19225 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19226 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19227 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19228 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19229 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19230 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19233 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19234 #undef THUMB_VARIANT
19235 #define THUMB_VARIANT & arm_ext_v4t
19237 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19238 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19240 #undef THUMB_VARIANT
19241 #define THUMB_VARIANT & arm_ext_v6t2
19243 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19244 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
19246 /* Generic coprocessor instructions. */
19247 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19248 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19249 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19250 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19251 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19252 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19253 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19256 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19258 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19259 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19262 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19263 #undef THUMB_VARIANT
19264 #define THUMB_VARIANT & arm_ext_msr
19266 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
19267 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
19270 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19271 #undef THUMB_VARIANT
19272 #define THUMB_VARIANT & arm_ext_v6t2
19274 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19275 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19276 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19277 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19278 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19279 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19280 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19281 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19284 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19285 #undef THUMB_VARIANT
19286 #define THUMB_VARIANT & arm_ext_v4t
19288 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19289 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19290 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19291 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19292 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19293 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19296 #define ARM_VARIANT & arm_ext_v4t_5
19298 /* ARM Architecture 4T. */
19299 /* Note: bx (and blx) are required on V5, even if the processor does
19300 not support Thumb. */
19301 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
19304 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19305 #undef THUMB_VARIANT
19306 #define THUMB_VARIANT & arm_ext_v5t
19308 /* Note: blx has 2 variants; the .value coded here is for
19309 BLX(2). Only this variant has conditional execution. */
19310 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
19311 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
19313 #undef THUMB_VARIANT
19314 #define THUMB_VARIANT & arm_ext_v6t2
19316 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
19317 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19318 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19319 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19320 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19321 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19322 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19323 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19326 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19327 #undef THUMB_VARIANT
19328 #define THUMB_VARIANT & arm_ext_v5exp
19330 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19331 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19332 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19333 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19335 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19336 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19338 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19339 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19340 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19341 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19343 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19344 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19345 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19346 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19348 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19349 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19351 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19352 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19353 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19354 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19357 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19358 #undef THUMB_VARIANT
19359 #define THUMB_VARIANT & arm_ext_v6t2
19361 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
19362 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
19364 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
19365 ADDRGLDRS
), ldrd
, t_ldstd
),
19367 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19368 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19371 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19373 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
19376 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19377 #undef THUMB_VARIANT
19378 #define THUMB_VARIANT & arm_ext_v6
19380 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19381 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19382 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19383 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19384 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19385 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19386 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19387 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19388 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19389 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
19391 #undef THUMB_VARIANT
19392 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19394 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
19395 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19397 #undef THUMB_VARIANT
19398 #define THUMB_VARIANT & arm_ext_v6t2
19400 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19401 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19403 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
19404 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
19406 /* ARM V6 not included in V7M. */
19407 #undef THUMB_VARIANT
19408 #define THUMB_VARIANT & arm_ext_v6_notm
19409 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19410 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19411 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
19412 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
19413 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19414 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19415 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
19416 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19417 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
19418 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19419 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19420 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19421 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19422 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19423 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
19424 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
19425 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19426 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19427 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
19429 /* ARM V6 not included in V7M (eg. integer SIMD). */
19430 #undef THUMB_VARIANT
19431 #define THUMB_VARIANT & arm_ext_v6_dsp
19432 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19433 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19434 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19435 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19436 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19437 /* Old name for QASX. */
19438 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19439 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19440 /* Old name for QSAX. */
19441 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19442 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19443 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19444 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19445 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19446 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19447 /* Old name for SASX. */
19448 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19449 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19450 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19451 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19452 /* Old name for SHASX. */
19453 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19454 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19455 /* Old name for SHSAX. */
19456 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19457 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19458 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19459 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19460 /* Old name for SSAX. */
19461 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19462 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19463 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19464 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19465 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19466 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19467 /* Old name for UASX. */
19468 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19469 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19470 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19471 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19472 /* Old name for UHASX. */
19473 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19474 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19475 /* Old name for UHSAX. */
19476 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19477 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19478 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19479 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19480 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19481 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19482 /* Old name for UQASX. */
19483 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19484 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19485 /* Old name for UQSAX. */
19486 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19487 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19488 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19489 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19490 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19491 /* Old name for USAX. */
19492 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19493 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19494 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19495 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19496 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19497 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19498 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19499 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19500 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19501 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19502 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19503 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19504 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19505 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19506 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19507 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19508 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19509 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19510 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19511 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19512 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19513 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19514 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19515 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19516 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19517 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19518 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19519 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19520 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19521 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19522 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19523 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19524 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19525 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19528 #define ARM_VARIANT & arm_ext_v6k
19529 #undef THUMB_VARIANT
19530 #define THUMB_VARIANT & arm_ext_v6k
19532 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19533 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19534 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19535 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19537 #undef THUMB_VARIANT
19538 #define THUMB_VARIANT & arm_ext_v6_notm
19539 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19541 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19542 RRnpcb
), strexd
, t_strexd
),
19544 #undef THUMB_VARIANT
19545 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19546 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19548 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19550 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19552 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19554 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19557 #define ARM_VARIANT & arm_ext_sec
19558 #undef THUMB_VARIANT
19559 #define THUMB_VARIANT & arm_ext_sec
19561 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19564 #define ARM_VARIANT & arm_ext_virt
19565 #undef THUMB_VARIANT
19566 #define THUMB_VARIANT & arm_ext_virt
19568 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19569 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19572 #define ARM_VARIANT & arm_ext_pan
19573 #undef THUMB_VARIANT
19574 #define THUMB_VARIANT & arm_ext_pan
19576 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19579 #define ARM_VARIANT & arm_ext_v6t2
19580 #undef THUMB_VARIANT
19581 #define THUMB_VARIANT & arm_ext_v6t2
19583 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19584 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19585 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19586 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19588 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19589 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19591 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19592 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19593 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19594 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19596 #undef THUMB_VARIANT
19597 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19598 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19599 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19601 /* Thumb-only instructions. */
19603 #define ARM_VARIANT NULL
19604 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19605 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19607 /* ARM does not really have an IT instruction, so always allow it.
19608 The opcode is copied from Thumb in order to allow warnings in
19609 -mimplicit-it=[never | arm] modes. */
19611 #define ARM_VARIANT & arm_ext_v1
19612 #undef THUMB_VARIANT
19613 #define THUMB_VARIANT & arm_ext_v6t2
19615 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19616 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19617 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19618 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19619 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19620 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19621 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19622 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19623 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19624 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19625 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19626 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19627 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19628 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19629 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19630 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19631 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19632 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19634 /* Thumb2 only instructions. */
19636 #define ARM_VARIANT NULL
19638 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19639 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19640 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19641 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19642 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19643 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19645 /* Hardware division instructions. */
19647 #define ARM_VARIANT & arm_ext_adiv
19648 #undef THUMB_VARIANT
19649 #define THUMB_VARIANT & arm_ext_div
19651 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19652 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19654 /* ARM V6M/V7 instructions. */
19656 #define ARM_VARIANT & arm_ext_barrier
19657 #undef THUMB_VARIANT
19658 #define THUMB_VARIANT & arm_ext_barrier
19660 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19661 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19662 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19664 /* ARM V7 instructions. */
19666 #define ARM_VARIANT & arm_ext_v7
19667 #undef THUMB_VARIANT
19668 #define THUMB_VARIANT & arm_ext_v7
19670 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19671 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19674 #define ARM_VARIANT & arm_ext_mp
19675 #undef THUMB_VARIANT
19676 #define THUMB_VARIANT & arm_ext_mp
19678 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19680 /* AArchv8 instructions. */
19682 #define ARM_VARIANT & arm_ext_v8
19684 /* Instructions shared between armv8-a and armv8-m. */
19685 #undef THUMB_VARIANT
19686 #define THUMB_VARIANT & arm_ext_atomics
19688 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19689 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19690 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19691 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19692 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19693 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19694 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19695 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19696 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19697 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19699 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19701 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19703 #undef THUMB_VARIANT
19704 #define THUMB_VARIANT & arm_ext_v8
19706 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19707 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19708 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19710 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19712 /* ARMv8 T32 only. */
19714 #define ARM_VARIANT NULL
19715 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19716 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19717 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19719 /* FP for ARMv8. */
19721 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19722 #undef THUMB_VARIANT
19723 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19725 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19726 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19727 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19728 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19729 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19730 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19731 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19732 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19733 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19734 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19735 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19736 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19737 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19738 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19739 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19740 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19741 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19743 /* Crypto v1 extensions. */
19745 #define ARM_VARIANT & fpu_crypto_ext_armv8
19746 #undef THUMB_VARIANT
19747 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19749 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19750 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19751 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19752 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19753 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19754 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19755 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19756 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19757 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19758 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19759 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19760 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19761 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19762 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19765 #define ARM_VARIANT & crc_ext_armv8
19766 #undef THUMB_VARIANT
19767 #define THUMB_VARIANT & crc_ext_armv8
19768 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19769 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19770 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19771 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19772 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19773 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19775 /* ARMv8.2 RAS extension. */
19777 #define ARM_VARIANT & arm_ext_ras
19778 #undef THUMB_VARIANT
19779 #define THUMB_VARIANT & arm_ext_ras
19780 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
19783 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19784 #undef THUMB_VARIANT
19785 #define THUMB_VARIANT NULL
19787 cCE("wfs", e200110
, 1, (RR
), rd
),
19788 cCE("rfs", e300110
, 1, (RR
), rd
),
19789 cCE("wfc", e400110
, 1, (RR
), rd
),
19790 cCE("rfc", e500110
, 1, (RR
), rd
),
19792 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19793 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19794 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19795 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19797 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19798 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19799 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19800 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19802 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19803 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19804 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19805 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19806 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19807 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19808 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19809 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19810 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19811 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19812 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19813 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19815 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19816 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19817 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19818 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19819 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19820 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19821 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19822 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19823 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19824 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19825 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19826 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19828 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19829 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19830 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19831 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19832 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19833 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19834 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19835 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19836 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19837 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19838 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19839 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19841 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19842 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19843 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19844 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19845 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19846 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19847 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19848 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19849 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19850 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19851 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19852 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19854 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19855 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19856 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19857 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19858 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19859 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19860 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
19861 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
19862 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
19863 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
19864 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
19865 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
19867 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
19868 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
19869 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
19870 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
19871 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
19872 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
19873 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
19874 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
19875 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
19876 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
19877 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
19878 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
19880 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
19881 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
19882 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
19883 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
19884 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
19885 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
19886 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
19887 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
19888 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
19889 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
19890 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
19891 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
19893 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
19894 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
19895 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
19896 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
19897 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
19898 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
19899 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
19900 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
19901 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
19902 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
19903 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
19904 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
19906 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
19907 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
19908 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
19909 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
19910 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
19911 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
19912 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
19913 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
19914 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
19915 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
19916 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
19917 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
19919 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
19920 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
19921 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
19922 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
19923 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
19924 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
19925 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
19926 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
19927 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
19928 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
19929 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
19930 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
19932 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
19933 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
19934 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
19935 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
19936 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
19937 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
19938 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
19939 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
19940 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
19941 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
19942 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
19943 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
19945 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
19946 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
19947 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
19948 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
19949 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
19950 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
19951 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
19952 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
19953 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
19954 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
19955 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
19956 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
19958 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
19959 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
19960 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
19961 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
19962 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
19963 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
19964 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
19965 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
19966 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
19967 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
19968 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
19969 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
19971 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
19972 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
19973 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
19974 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
19975 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
19976 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
19977 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
19978 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
19979 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
19980 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
19981 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
19982 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
19984 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
19985 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
19986 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
19987 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
19988 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
19989 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
19990 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
19991 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
19992 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
19993 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
19994 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
19995 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
19997 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
19998 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
19999 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
20000 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
20001 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
20002 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
20003 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
20004 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
20005 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
20006 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
20007 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
20008 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
20010 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20011 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20012 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20013 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20014 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20015 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20016 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20017 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20018 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20019 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20020 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20021 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20023 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20024 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20025 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20026 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20027 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20028 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20029 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20030 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20031 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20032 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20033 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20034 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20036 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20037 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20038 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20039 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20040 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20041 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20042 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20043 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20044 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20045 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20046 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20047 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20049 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20050 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20051 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20052 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20053 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20054 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20055 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20056 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20057 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20058 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20059 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20060 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20062 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20063 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20064 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20065 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20066 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20067 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20068 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20069 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20070 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20071 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20072 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20073 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20075 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20076 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20077 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20078 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20079 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20080 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20081 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20082 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20083 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20084 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20085 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20086 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20088 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20089 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20090 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20091 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20092 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20093 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20094 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20095 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20096 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20097 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20098 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20099 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20101 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20102 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20103 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20104 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20105 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20106 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20107 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20108 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20109 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20110 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20111 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20112 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20114 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20115 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20116 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20117 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20118 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20119 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20120 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20121 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20122 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20123 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20124 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20125 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20127 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20128 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20129 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20130 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20131 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20132 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20133 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20134 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20135 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20136 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20137 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20138 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20140 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20141 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20142 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20143 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20144 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20145 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20146 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20147 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20148 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20149 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20150 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20151 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20153 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20154 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20155 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20156 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20157 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20158 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20159 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20160 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20161 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20162 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20163 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20164 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20166 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20167 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20168 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20169 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20170 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20171 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20172 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20173 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20174 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20175 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20176 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20177 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20179 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20180 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20181 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20182 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20184 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
20185 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
20186 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
20187 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
20188 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
20189 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
20190 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
20191 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
20192 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
20193 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
20194 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
20195 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
20197 /* The implementation of the FIX instruction is broken on some
20198 assemblers, in that it accepts a precision specifier as well as a
20199 rounding specifier, despite the fact that this is meaningless.
20200 To be more compatible, we accept it as well, though of course it
20201 does not set any bits. */
20202 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
20203 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
20204 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
20205 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
20206 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
20207 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
20208 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
20209 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
20210 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
20211 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
20212 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
20213 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
20214 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
20216 /* Instructions that were new with the real FPA, call them V2. */
20218 #define ARM_VARIANT & fpu_fpa_ext_v2
20220 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20221 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20222 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20223 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20224 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20225 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20228 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20230 /* Moves and type conversions. */
20231 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20232 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
20233 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
20234 cCE("fmstat", ef1fa10
, 0, (), noargs
),
20235 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
20236 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
20237 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20238 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20239 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20240 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20241 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20242 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20243 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
20244 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
20246 /* Memory operations. */
20247 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20248 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20249 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20250 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20251 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20252 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20253 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20254 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20255 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20256 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20257 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20258 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20259 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20260 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20261 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20262 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20263 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20264 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20266 /* Monadic operations. */
20267 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20268 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20269 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20271 /* Dyadic operations. */
20272 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20273 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20274 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20275 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20276 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20277 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20278 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20279 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20280 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20283 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20284 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
20285 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20286 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
20288 /* Double precision load/store are still present on single precision
20289 implementations. */
20290 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20291 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20292 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20293 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20294 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20295 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20296 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20297 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20298 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20299 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20302 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20304 /* Moves and type conversions. */
20305 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20306 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20307 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20308 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20309 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20310 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20311 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20312 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20313 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20314 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20315 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20316 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20317 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20319 /* Monadic operations. */
20320 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20321 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20322 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20324 /* Dyadic operations. */
20325 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20326 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20327 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20328 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20329 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20330 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20331 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20332 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20333 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20336 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20337 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
20338 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20339 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
20342 #define ARM_VARIANT & fpu_vfp_ext_v2
20344 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
20345 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
20346 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
20347 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
20349 /* Instructions which may belong to either the Neon or VFP instruction sets.
20350 Individual encoder functions perform additional architecture checks. */
20352 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20353 #undef THUMB_VARIANT
20354 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20356 /* These mnemonics are unique to VFP. */
20357 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
20358 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
20359 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20360 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20361 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20362 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20363 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20364 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
20365 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
20366 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
20368 /* Mnemonics shared by Neon and VFP. */
20369 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
20370 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20371 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20373 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20374 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20376 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20377 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20379 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20380 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20381 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20382 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20383 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20384 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20385 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20386 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20388 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
20389 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
20390 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
20391 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
20394 /* NOTE: All VMOV encoding is special-cased! */
20395 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
20396 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
20399 #define ARM_VARIANT & arm_ext_fp16
20400 #undef THUMB_VARIANT
20401 #define THUMB_VARIANT & arm_ext_fp16
20402 /* New instructions added from v8.2, allowing the extraction and insertion of
20403 the upper 16 bits of a 32-bit vector register. */
20404 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
20405 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
20407 #undef THUMB_VARIANT
20408 #define THUMB_VARIANT & fpu_neon_ext_v1
20410 #define ARM_VARIANT & fpu_neon_ext_v1
20412 /* Data processing with three registers of the same length. */
20413 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20414 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
20415 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
20416 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20417 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20418 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20419 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20420 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20421 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20422 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20423 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20424 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20425 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20426 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20427 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20428 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20429 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20430 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20431 /* If not immediate, fall back to neon_dyadic_i64_su.
20432 shl_imm should accept I8 I16 I32 I64,
20433 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20434 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
20435 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
20436 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
20437 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
20438 /* Logic ops, types optional & ignored. */
20439 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20440 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20441 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20442 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20443 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20444 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20445 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20446 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20447 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20448 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20449 /* Bitfield ops, untyped. */
20450 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20451 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20452 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20453 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20454 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20455 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20456 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20457 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20458 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20459 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20460 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20461 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20462 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20463 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20464 back to neon_dyadic_if_su. */
20465 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20466 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20467 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20468 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20469 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20470 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20471 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20472 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20473 /* Comparison. Type I8 I16 I32 F32. */
20474 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20475 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20476 /* As above, D registers only. */
20477 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20478 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20479 /* Int and float variants, signedness unimportant. */
20480 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20481 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20482 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20483 /* Add/sub take types I8 I16 I32 I64 F32. */
20484 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20485 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20486 /* vtst takes sizes 8, 16, 32. */
20487 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20488 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20489 /* VMUL takes I8 I16 I32 F32 P8. */
20490 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20491 /* VQD{R}MULH takes S16 S32. */
20492 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20493 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20494 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20495 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20496 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20497 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20498 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20499 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20500 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20501 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20502 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20503 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20504 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20505 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20506 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20507 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20508 /* ARM v8.1 extension. */
20509 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20510 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20511 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20512 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20514 /* Two address, int/float. Types S8 S16 S32 F32. */
20515 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20516 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20518 /* Data processing with two registers and a shift amount. */
20519 /* Right shifts, and variants with rounding.
20520 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20521 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20522 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20523 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20524 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20525 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20526 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20527 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20528 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20529 /* Shift and insert. Sizes accepted 8 16 32 64. */
20530 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20531 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20532 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20533 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20534 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20535 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20536 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20537 /* Right shift immediate, saturating & narrowing, with rounding variants.
20538 Types accepted S16 S32 S64 U16 U32 U64. */
20539 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20540 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20541 /* As above, unsigned. Types accepted S16 S32 S64. */
20542 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20543 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20544 /* Right shift narrowing. Types accepted I16 I32 I64. */
20545 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20546 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20547 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20548 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20549 /* CVT with optional immediate for fixed-point variant. */
20550 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20552 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20553 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20555 /* Data processing, three registers of different lengths. */
20556 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20557 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20558 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20559 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20560 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20561 /* If not scalar, fall back to neon_dyadic_long.
20562 Vector types as above, scalar types S16 S32 U16 U32. */
20563 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20564 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20565 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20566 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20567 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20568 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20569 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20570 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20571 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20572 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20573 /* Saturating doubling multiplies. Types S16 S32. */
20574 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20575 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20576 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20577 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20578 S16 S32 U16 U32. */
20579 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20581 /* Extract. Size 8. */
20582 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20583 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20585 /* Two registers, miscellaneous. */
20586 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20587 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20588 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20589 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20590 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20591 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20592 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20593 /* Vector replicate. Sizes 8 16 32. */
20594 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20595 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20596 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20597 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20598 /* VMOVN. Types I16 I32 I64. */
20599 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20600 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20601 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20602 /* VQMOVUN. Types S16 S32 S64. */
20603 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20604 /* VZIP / VUZP. Sizes 8 16 32. */
20605 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20606 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20607 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20608 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20609 /* VQABS / VQNEG. Types S8 S16 S32. */
20610 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20611 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20612 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20613 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20614 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20615 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20616 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20617 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20618 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20619 /* Reciprocal estimates. Types U32 F16 F32. */
20620 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20621 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20622 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20623 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20624 /* VCLS. Types S8 S16 S32. */
20625 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20626 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20627 /* VCLZ. Types I8 I16 I32. */
20628 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20629 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20630 /* VCNT. Size 8. */
20631 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20632 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20633 /* Two address, untyped. */
20634 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20635 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20636 /* VTRN. Sizes 8 16 32. */
20637 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20638 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20640 /* Table lookup. Size 8. */
20641 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20642 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20644 #undef THUMB_VARIANT
20645 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20647 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20649 /* Neon element/structure load/store. */
20650 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20651 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20652 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20653 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20654 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20655 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20656 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20657 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20659 #undef THUMB_VARIANT
20660 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20662 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20663 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20664 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20665 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20666 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20667 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20668 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20669 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20670 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20671 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20673 #undef THUMB_VARIANT
20674 #define THUMB_VARIANT & fpu_vfp_ext_v3
20676 #define ARM_VARIANT & fpu_vfp_ext_v3
20678 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20679 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20680 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20681 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20682 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20683 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20684 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20685 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20686 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20689 #define ARM_VARIANT & fpu_vfp_ext_fma
20690 #undef THUMB_VARIANT
20691 #define THUMB_VARIANT & fpu_vfp_ext_fma
20692 /* Mnemonics shared by Neon and VFP. These are included in the
20693 VFP FMA variant; NEON and VFP FMA always includes the NEON
20694 FMA instructions. */
20695 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20696 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20697 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20698 the v form should always be used. */
20699 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20700 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20701 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20702 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20703 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20704 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20706 #undef THUMB_VARIANT
20708 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20710 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20711 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20712 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20713 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20714 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20715 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20716 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20717 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20720 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20722 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20723 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20724 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20725 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20726 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20727 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20728 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20729 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20730 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20731 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20732 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20733 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20734 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20735 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20736 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20737 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20738 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20739 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20740 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20741 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20742 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20743 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20744 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20745 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20746 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20747 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20748 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20749 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20750 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20751 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20752 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20753 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20754 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20755 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20756 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20757 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20758 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20759 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20760 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20761 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20762 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20763 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20764 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20765 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20766 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20767 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20768 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20769 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20770 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20771 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20772 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20773 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20774 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20775 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20776 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20777 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20778 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20779 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20780 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20781 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20782 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20783 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20784 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20785 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20786 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20787 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20788 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20789 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20790 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20791 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20792 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20793 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20794 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20795 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20796 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20797 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20798 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20799 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20800 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20801 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20802 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20803 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20804 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20805 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20806 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20807 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20808 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20809 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20810 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20811 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20812 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20813 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20814 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20815 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20816 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20817 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20818 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20819 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20820 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20821 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20822 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20823 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20824 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20825 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20826 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20827 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20828 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20829 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20830 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20831 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20832 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20833 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20834 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20835 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20836 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20837 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20838 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20839 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20840 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20841 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20842 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20843 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20844 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20845 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20846 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20847 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20848 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20849 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20850 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20851 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20852 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20853 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20854 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20855 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20856 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20857 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20858 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20859 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20860 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20861 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20862 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20863 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20864 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20865 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20866 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20867 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20868 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20869 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20870 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20871 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20872 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20873 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20874 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20875 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20876 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20877 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20878 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20879 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20880 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20881 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20882 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20883 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
20886 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20888 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
20889 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
20890 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
20891 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20892 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20893 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20894 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20895 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20896 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20897 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20898 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20899 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20900 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20901 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20902 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20903 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20904 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20905 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20906 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20907 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20908 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
20909 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20910 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20911 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20912 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20913 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20914 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20915 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20916 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20917 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20918 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20919 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20920 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20921 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20922 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20923 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20924 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20925 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20926 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20927 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20928 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20929 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20930 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20931 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20932 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20933 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20934 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20935 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20936 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20937 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20938 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20939 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20940 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20941 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20942 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20943 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20944 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20947 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20949 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20950 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20951 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20952 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20953 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20954 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20955 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20956 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20957 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
20958 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
20959 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
20960 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
20961 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
20962 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
20963 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
20964 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
20965 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
20966 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
20967 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
20968 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
20969 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
20970 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
20971 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
20972 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
20973 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
20974 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
20975 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
20976 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
20977 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
20978 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
20979 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
20980 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
20981 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
20982 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
20983 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
20984 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
20985 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
20986 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
20987 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
20988 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
20989 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
20990 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
20991 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
20992 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
20993 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
20994 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
20995 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
20996 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
20997 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
20998 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
20999 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
21000 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
21001 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
21002 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
21003 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21004 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21005 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21006 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21007 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21008 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21009 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
21010 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
21011 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
21012 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
21013 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21014 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21015 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21016 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21017 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21018 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21019 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21020 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21021 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21022 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21023 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21024 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21026 /* ARMv8-M instructions. */
21028 #define ARM_VARIANT NULL
21029 #undef THUMB_VARIANT
21030 #define THUMB_VARIANT & arm_ext_v8m
21031 TUE("sg", 0, e97fe97f
, 0, (), 0, noargs
),
21032 TUE("blxns", 0, 4784, 1, (RRnpc
), 0, t_blx
),
21033 TUE("bxns", 0, 4704, 1, (RRnpc
), 0, t_bx
),
21034 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
21035 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
21036 TUE("tta", 0, e840f080
, 2, (RRnpc
, RRnpc
), 0, tt
),
21037 TUE("ttat", 0, e840f0c0
, 2, (RRnpc
, RRnpc
), 0, tt
),
21039 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21040 instructions behave as nop if no VFP is present. */
21041 #undef THUMB_VARIANT
21042 #define THUMB_VARIANT & arm_ext_v8m_main
21043 TUEc("vlldm", 0, ec300a00
, 1, (RRnpc
), rn
),
21044 TUEc("vlstm", 0, ec200a00
, 1, (RRnpc
), rn
),
21047 #undef THUMB_VARIANT
21073 /* MD interface: bits in the object file. */
21075 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21076 for use in the a.out file, and stores them in the array pointed to by buf.
21077 This knows about the endian-ness of the target machine and does
21078 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21079 2 (short) and 4 (long) Floating numbers are put out as a series of
21080 LITTLENUMS (shorts, here at least). */
21083 md_number_to_chars (char * buf
, valueT val
, int n
)
21085 if (target_big_endian
)
21086 number_to_chars_bigendian (buf
, val
, n
);
21088 number_to_chars_littleendian (buf
, val
, n
);
21092 md_chars_to_number (char * buf
, int n
)
21095 unsigned char * where
= (unsigned char *) buf
;
21097 if (target_big_endian
)
21102 result
|= (*where
++ & 255);
21110 result
|= (where
[n
] & 255);
21117 /* MD interface: Sections. */
21119 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21120 that an rs_machine_dependent frag may reach. */
21123 arm_frag_max_var (fragS
*fragp
)
21125 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21126 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21128 Note that we generate relaxable instructions even for cases that don't
21129 really need it, like an immediate that's a trivial constant. So we're
21130 overestimating the instruction size for some of those cases. Rather
21131 than putting more intelligence here, it would probably be better to
21132 avoid generating a relaxation frag in the first place when it can be
21133 determined up front that a short instruction will suffice. */
21135 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
21139 /* Estimate the size of a frag before relaxing. Assume everything fits in
21143 md_estimate_size_before_relax (fragS
* fragp
,
21144 segT segtype ATTRIBUTE_UNUSED
)
21150 /* Convert a machine dependent frag. */
21153 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
21155 unsigned long insn
;
21156 unsigned long old_op
;
21164 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21166 old_op
= bfd_get_16(abfd
, buf
);
21167 if (fragp
->fr_symbol
)
21169 exp
.X_op
= O_symbol
;
21170 exp
.X_add_symbol
= fragp
->fr_symbol
;
21174 exp
.X_op
= O_constant
;
21176 exp
.X_add_number
= fragp
->fr_offset
;
21177 opcode
= fragp
->fr_subtype
;
21180 case T_MNEM_ldr_pc
:
21181 case T_MNEM_ldr_pc2
:
21182 case T_MNEM_ldr_sp
:
21183 case T_MNEM_str_sp
:
21190 if (fragp
->fr_var
== 4)
21192 insn
= THUMB_OP32 (opcode
);
21193 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
21195 insn
|= (old_op
& 0x700) << 4;
21199 insn
|= (old_op
& 7) << 12;
21200 insn
|= (old_op
& 0x38) << 13;
21202 insn
|= 0x00000c00;
21203 put_thumb32_insn (buf
, insn
);
21204 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
21208 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
21210 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
21213 if (fragp
->fr_var
== 4)
21215 insn
= THUMB_OP32 (opcode
);
21216 insn
|= (old_op
& 0xf0) << 4;
21217 put_thumb32_insn (buf
, insn
);
21218 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
21222 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21223 exp
.X_add_number
-= 4;
21231 if (fragp
->fr_var
== 4)
21233 int r0off
= (opcode
== T_MNEM_mov
21234 || opcode
== T_MNEM_movs
) ? 0 : 8;
21235 insn
= THUMB_OP32 (opcode
);
21236 insn
= (insn
& 0xe1ffffff) | 0x10000000;
21237 insn
|= (old_op
& 0x700) << r0off
;
21238 put_thumb32_insn (buf
, insn
);
21239 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21243 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
21248 if (fragp
->fr_var
== 4)
21250 insn
= THUMB_OP32(opcode
);
21251 put_thumb32_insn (buf
, insn
);
21252 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
21255 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
21259 if (fragp
->fr_var
== 4)
21261 insn
= THUMB_OP32(opcode
);
21262 insn
|= (old_op
& 0xf00) << 14;
21263 put_thumb32_insn (buf
, insn
);
21264 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
21267 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
21270 case T_MNEM_add_sp
:
21271 case T_MNEM_add_pc
:
21272 case T_MNEM_inc_sp
:
21273 case T_MNEM_dec_sp
:
21274 if (fragp
->fr_var
== 4)
21276 /* ??? Choose between add and addw. */
21277 insn
= THUMB_OP32 (opcode
);
21278 insn
|= (old_op
& 0xf0) << 4;
21279 put_thumb32_insn (buf
, insn
);
21280 if (opcode
== T_MNEM_add_pc
)
21281 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
21283 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21286 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21294 if (fragp
->fr_var
== 4)
21296 insn
= THUMB_OP32 (opcode
);
21297 insn
|= (old_op
& 0xf0) << 4;
21298 insn
|= (old_op
& 0xf) << 16;
21299 put_thumb32_insn (buf
, insn
);
21300 if (insn
& (1 << 20))
21301 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21303 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21306 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21312 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
21313 (enum bfd_reloc_code_real
) reloc_type
);
21314 fixp
->fx_file
= fragp
->fr_file
;
21315 fixp
->fx_line
= fragp
->fr_line
;
21316 fragp
->fr_fix
+= fragp
->fr_var
;
21318 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21319 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
21320 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
21321 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
21324 /* Return the size of a relaxable immediate operand instruction.
21325 SHIFT and SIZE specify the form of the allowable immediate. */
21327 relax_immediate (fragS
*fragp
, int size
, int shift
)
21333 /* ??? Should be able to do better than this. */
21334 if (fragp
->fr_symbol
)
21337 low
= (1 << shift
) - 1;
21338 mask
= (1 << (shift
+ size
)) - (1 << shift
);
21339 offset
= fragp
->fr_offset
;
21340 /* Force misaligned offsets to 32-bit variant. */
21343 if (offset
& ~mask
)
21348 /* Get the address of a symbol during relaxation. */
21350 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
21356 sym
= fragp
->fr_symbol
;
21357 sym_frag
= symbol_get_frag (sym
);
21358 know (S_GET_SEGMENT (sym
) != absolute_section
21359 || sym_frag
== &zero_address_frag
);
21360 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
21362 /* If frag has yet to be reached on this pass, assume it will
21363 move by STRETCH just as we did. If this is not so, it will
21364 be because some frag between grows, and that will force
21368 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
21372 /* Adjust stretch for any alignment frag. Note that if have
21373 been expanding the earlier code, the symbol may be
21374 defined in what appears to be an earlier frag. FIXME:
21375 This doesn't handle the fr_subtype field, which specifies
21376 a maximum number of bytes to skip when doing an
21378 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
21380 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
21383 stretch
= - ((- stretch
)
21384 & ~ ((1 << (int) f
->fr_offset
) - 1));
21386 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
21398 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21401 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
21406 /* Assume worst case for symbols not known to be in the same section. */
21407 if (fragp
->fr_symbol
== NULL
21408 || !S_IS_DEFINED (fragp
->fr_symbol
)
21409 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21410 || S_IS_WEAK (fragp
->fr_symbol
))
21413 val
= relaxed_symbol_addr (fragp
, stretch
);
21414 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
21415 addr
= (addr
+ 4) & ~3;
21416 /* Force misaligned targets to 32-bit variant. */
21420 if (val
< 0 || val
> 1020)
21425 /* Return the size of a relaxable add/sub immediate instruction. */
21427 relax_addsub (fragS
*fragp
, asection
*sec
)
21432 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21433 op
= bfd_get_16(sec
->owner
, buf
);
21434 if ((op
& 0xf) == ((op
>> 4) & 0xf))
21435 return relax_immediate (fragp
, 8, 0);
21437 return relax_immediate (fragp
, 3, 0);
21440 /* Return TRUE iff the definition of symbol S could be pre-empted
21441 (overridden) at link or load time. */
21443 symbol_preemptible (symbolS
*s
)
21445 /* Weak symbols can always be pre-empted. */
21449 /* Non-global symbols cannot be pre-empted. */
21450 if (! S_IS_EXTERNAL (s
))
21454 /* In ELF, a global symbol can be marked protected, or private. In that
21455 case it can't be pre-empted (other definitions in the same link unit
21456 would violate the ODR). */
21457 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21461 /* Other global symbols might be pre-empted. */
21465 /* Return the size of a relaxable branch instruction. BITS is the
21466 size of the offset field in the narrow instruction. */
21469 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21475 /* Assume worst case for symbols not known to be in the same section. */
21476 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21477 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21478 || S_IS_WEAK (fragp
->fr_symbol
))
21482 /* A branch to a function in ARM state will require interworking. */
21483 if (S_IS_DEFINED (fragp
->fr_symbol
)
21484 && ARM_IS_FUNC (fragp
->fr_symbol
))
21488 if (symbol_preemptible (fragp
->fr_symbol
))
21491 val
= relaxed_symbol_addr (fragp
, stretch
);
21492 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21495 /* Offset is a signed value *2 */
21497 if (val
>= limit
|| val
< -limit
)
21503 /* Relax a machine dependent frag. This returns the amount by which
21504 the current size of the frag should change. */
21507 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21512 oldsize
= fragp
->fr_var
;
21513 switch (fragp
->fr_subtype
)
21515 case T_MNEM_ldr_pc2
:
21516 newsize
= relax_adr (fragp
, sec
, stretch
);
21518 case T_MNEM_ldr_pc
:
21519 case T_MNEM_ldr_sp
:
21520 case T_MNEM_str_sp
:
21521 newsize
= relax_immediate (fragp
, 8, 2);
21525 newsize
= relax_immediate (fragp
, 5, 2);
21529 newsize
= relax_immediate (fragp
, 5, 1);
21533 newsize
= relax_immediate (fragp
, 5, 0);
21536 newsize
= relax_adr (fragp
, sec
, stretch
);
21542 newsize
= relax_immediate (fragp
, 8, 0);
21545 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21548 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21550 case T_MNEM_add_sp
:
21551 case T_MNEM_add_pc
:
21552 newsize
= relax_immediate (fragp
, 8, 2);
21554 case T_MNEM_inc_sp
:
21555 case T_MNEM_dec_sp
:
21556 newsize
= relax_immediate (fragp
, 7, 2);
21562 newsize
= relax_addsub (fragp
, sec
);
21568 fragp
->fr_var
= newsize
;
21569 /* Freeze wide instructions that are at or before the same location as
21570 in the previous pass. This avoids infinite loops.
21571 Don't freeze them unconditionally because targets may be artificially
21572 misaligned by the expansion of preceding frags. */
21573 if (stretch
<= 0 && newsize
> 2)
21575 md_convert_frag (sec
->owner
, sec
, fragp
);
21579 return newsize
- oldsize
;
21582 /* Round up a section size to the appropriate boundary. */
21585 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21588 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21589 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21591 /* For a.out, force the section size to be aligned. If we don't do
21592 this, BFD will align it for us, but it will not write out the
21593 final bytes of the section. This may be a bug in BFD, but it is
21594 easier to fix it here since that is how the other a.out targets
21598 align
= bfd_get_section_alignment (stdoutput
, segment
);
21599 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21606 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21607 of an rs_align_code fragment. */
21610 arm_handle_align (fragS
* fragP
)
21612 static unsigned char const arm_noop
[2][2][4] =
21615 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21616 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21619 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21620 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21623 static unsigned char const thumb_noop
[2][2][2] =
21626 {0xc0, 0x46}, /* LE */
21627 {0x46, 0xc0}, /* BE */
21630 {0x00, 0xbf}, /* LE */
21631 {0xbf, 0x00} /* BE */
21634 static unsigned char const wide_thumb_noop
[2][4] =
21635 { /* Wide Thumb-2 */
21636 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21637 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21640 unsigned bytes
, fix
, noop_size
;
21642 const unsigned char * noop
;
21643 const unsigned char *narrow_noop
= NULL
;
21648 if (fragP
->fr_type
!= rs_align_code
)
21651 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21652 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21655 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21656 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21658 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21660 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21662 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21663 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21665 narrow_noop
= thumb_noop
[1][target_big_endian
];
21666 noop
= wide_thumb_noop
[target_big_endian
];
21669 noop
= thumb_noop
[0][target_big_endian
];
21677 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21678 ? selected_cpu
: arm_arch_none
,
21680 [target_big_endian
];
21687 fragP
->fr_var
= noop_size
;
21689 if (bytes
& (noop_size
- 1))
21691 fix
= bytes
& (noop_size
- 1);
21693 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21695 memset (p
, 0, fix
);
21702 if (bytes
& noop_size
)
21704 /* Insert a narrow noop. */
21705 memcpy (p
, narrow_noop
, noop_size
);
21707 bytes
-= noop_size
;
21711 /* Use wide noops for the remainder */
21715 while (bytes
>= noop_size
)
21717 memcpy (p
, noop
, noop_size
);
21719 bytes
-= noop_size
;
21723 fragP
->fr_fix
+= fix
;
21726 /* Called from md_do_align. Used to create an alignment
21727 frag in a code section. */
21730 arm_frag_align_code (int n
, int max
)
21734 /* We assume that there will never be a requirement
21735 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21736 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21741 _("alignments greater than %d bytes not supported in .text sections."),
21742 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21743 as_fatal ("%s", err_msg
);
21746 p
= frag_var (rs_align_code
,
21747 MAX_MEM_FOR_RS_ALIGN_CODE
,
21749 (relax_substateT
) max
,
21756 /* Perform target specific initialisation of a frag.
21757 Note - despite the name this initialisation is not done when the frag
21758 is created, but only when its type is assigned. A frag can be created
21759 and used a long time before its type is set, so beware of assuming that
21760 this initialisationis performed first. */
21764 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21766 /* Record whether this frag is in an ARM or a THUMB area. */
21767 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21770 #else /* OBJ_ELF is defined. */
21772 arm_init_frag (fragS
* fragP
, int max_chars
)
21774 int frag_thumb_mode
;
21776 /* If the current ARM vs THUMB mode has not already
21777 been recorded into this frag then do so now. */
21778 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21779 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21781 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
21783 /* Record a mapping symbol for alignment frags. We will delete this
21784 later if the alignment ends up empty. */
21785 switch (fragP
->fr_type
)
21788 case rs_align_test
:
21790 mapping_state_2 (MAP_DATA
, max_chars
);
21792 case rs_align_code
:
21793 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21800 /* When we change sections we need to issue a new mapping symbol. */
21803 arm_elf_change_section (void)
21805 /* Link an unlinked unwind index table section to the .text section. */
21806 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21807 && elf_linked_to_section (now_seg
) == NULL
)
21808 elf_linked_to_section (now_seg
) = text_section
;
21812 arm_elf_section_type (const char * str
, size_t len
)
21814 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21815 return SHT_ARM_EXIDX
;
21820 /* Code to deal with unwinding tables. */
21822 static void add_unwind_adjustsp (offsetT
);
21824 /* Generate any deferred unwind frame offset. */
21827 flush_pending_unwind (void)
21831 offset
= unwind
.pending_offset
;
21832 unwind
.pending_offset
= 0;
21834 add_unwind_adjustsp (offset
);
21837 /* Add an opcode to this list for this function. Two-byte opcodes should
21838 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21842 add_unwind_opcode (valueT op
, int length
)
21844 /* Add any deferred stack adjustment. */
21845 if (unwind
.pending_offset
)
21846 flush_pending_unwind ();
21848 unwind
.sp_restored
= 0;
21850 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21852 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21853 if (unwind
.opcodes
)
21854 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
21855 unwind
.opcode_alloc
);
21857 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
21862 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
21864 unwind
.opcode_count
++;
21868 /* Add unwind opcodes to adjust the stack pointer. */
21871 add_unwind_adjustsp (offsetT offset
)
21875 if (offset
> 0x200)
21877 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21882 /* Long form: 0xb2, uleb128. */
21883 /* This might not fit in a word so add the individual bytes,
21884 remembering the list is built in reverse order. */
21885 o
= (valueT
) ((offset
- 0x204) >> 2);
21887 add_unwind_opcode (0, 1);
21889 /* Calculate the uleb128 encoding of the offset. */
21893 bytes
[n
] = o
& 0x7f;
21899 /* Add the insn. */
21901 add_unwind_opcode (bytes
[n
- 1], 1);
21902 add_unwind_opcode (0xb2, 1);
21904 else if (offset
> 0x100)
21906 /* Two short opcodes. */
21907 add_unwind_opcode (0x3f, 1);
21908 op
= (offset
- 0x104) >> 2;
21909 add_unwind_opcode (op
, 1);
21911 else if (offset
> 0)
21913 /* Short opcode. */
21914 op
= (offset
- 4) >> 2;
21915 add_unwind_opcode (op
, 1);
21917 else if (offset
< 0)
21920 while (offset
> 0x100)
21922 add_unwind_opcode (0x7f, 1);
21925 op
= ((offset
- 4) >> 2) | 0x40;
21926 add_unwind_opcode (op
, 1);
21930 /* Finish the list of unwind opcodes for this function. */
21932 finish_unwind_opcodes (void)
21936 if (unwind
.fp_used
)
21938 /* Adjust sp as necessary. */
21939 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
21940 flush_pending_unwind ();
21942 /* After restoring sp from the frame pointer. */
21943 op
= 0x90 | unwind
.fp_reg
;
21944 add_unwind_opcode (op
, 1);
21947 flush_pending_unwind ();
21951 /* Start an exception table entry. If idx is nonzero this is an index table
21955 start_unwind_section (const segT text_seg
, int idx
)
21957 const char * text_name
;
21958 const char * prefix
;
21959 const char * prefix_once
;
21960 const char * group_name
;
21968 prefix
= ELF_STRING_ARM_unwind
;
21969 prefix_once
= ELF_STRING_ARM_unwind_once
;
21970 type
= SHT_ARM_EXIDX
;
21974 prefix
= ELF_STRING_ARM_unwind_info
;
21975 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
21976 type
= SHT_PROGBITS
;
21979 text_name
= segment_name (text_seg
);
21980 if (streq (text_name
, ".text"))
21983 if (strncmp (text_name
, ".gnu.linkonce.t.",
21984 strlen (".gnu.linkonce.t.")) == 0)
21986 prefix
= prefix_once
;
21987 text_name
+= strlen (".gnu.linkonce.t.");
21990 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
21996 /* Handle COMDAT group. */
21997 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
21999 group_name
= elf_group_name (text_seg
);
22000 if (group_name
== NULL
)
22002 as_bad (_("Group section `%s' has no group signature"),
22003 segment_name (text_seg
));
22004 ignore_rest_of_line ();
22007 flags
|= SHF_GROUP
;
22011 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
22013 /* Set the section link for index tables. */
22015 elf_linked_to_section (now_seg
) = text_seg
;
22019 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22020 personality routine data. Returns zero, or the index table value for
22021 an inline entry. */
22024 create_unwind_entry (int have_data
)
22029 /* The current word of data. */
22031 /* The number of bytes left in this word. */
22034 finish_unwind_opcodes ();
22036 /* Remember the current text section. */
22037 unwind
.saved_seg
= now_seg
;
22038 unwind
.saved_subseg
= now_subseg
;
22040 start_unwind_section (now_seg
, 0);
22042 if (unwind
.personality_routine
== NULL
)
22044 if (unwind
.personality_index
== -2)
22047 as_bad (_("handlerdata in cantunwind frame"));
22048 return 1; /* EXIDX_CANTUNWIND. */
22051 /* Use a default personality routine if none is specified. */
22052 if (unwind
.personality_index
== -1)
22054 if (unwind
.opcode_count
> 3)
22055 unwind
.personality_index
= 1;
22057 unwind
.personality_index
= 0;
22060 /* Space for the personality routine entry. */
22061 if (unwind
.personality_index
== 0)
22063 if (unwind
.opcode_count
> 3)
22064 as_bad (_("too many unwind opcodes for personality routine 0"));
22068 /* All the data is inline in the index table. */
22071 while (unwind
.opcode_count
> 0)
22073 unwind
.opcode_count
--;
22074 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22078 /* Pad with "finish" opcodes. */
22080 data
= (data
<< 8) | 0xb0;
22087 /* We get two opcodes "free" in the first word. */
22088 size
= unwind
.opcode_count
- 2;
22092 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22093 if (unwind
.personality_index
!= -1)
22095 as_bad (_("attempt to recreate an unwind entry"));
22099 /* An extra byte is required for the opcode count. */
22100 size
= unwind
.opcode_count
+ 1;
22103 size
= (size
+ 3) >> 2;
22105 as_bad (_("too many unwind opcodes"));
22107 frag_align (2, 0, 0);
22108 record_alignment (now_seg
, 2);
22109 unwind
.table_entry
= expr_build_dot ();
22111 /* Allocate the table entry. */
22112 ptr
= frag_more ((size
<< 2) + 4);
22113 /* PR 13449: Zero the table entries in case some of them are not used. */
22114 memset (ptr
, 0, (size
<< 2) + 4);
22115 where
= frag_now_fix () - ((size
<< 2) + 4);
22117 switch (unwind
.personality_index
)
22120 /* ??? Should this be a PLT generating relocation? */
22121 /* Custom personality routine. */
22122 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
22123 BFD_RELOC_ARM_PREL31
);
22128 /* Set the first byte to the number of additional words. */
22129 data
= size
> 0 ? size
- 1 : 0;
22133 /* ABI defined personality routines. */
22135 /* Three opcodes bytes are packed into the first word. */
22142 /* The size and first two opcode bytes go in the first word. */
22143 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
22148 /* Should never happen. */
22152 /* Pack the opcodes into words (MSB first), reversing the list at the same
22154 while (unwind
.opcode_count
> 0)
22158 md_number_to_chars (ptr
, data
, 4);
22163 unwind
.opcode_count
--;
22165 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22168 /* Finish off the last word. */
22171 /* Pad with "finish" opcodes. */
22173 data
= (data
<< 8) | 0xb0;
22175 md_number_to_chars (ptr
, data
, 4);
22180 /* Add an empty descriptor if there is no user-specified data. */
22181 ptr
= frag_more (4);
22182 md_number_to_chars (ptr
, 0, 4);
22189 /* Initialize the DWARF-2 unwind information for this procedure. */
22192 tc_arm_frame_initial_instructions (void)
22194 cfi_add_CFA_def_cfa (REG_SP
, 0);
22196 #endif /* OBJ_ELF */
22198 /* Convert REGNAME to a DWARF-2 register number. */
22201 tc_arm_regname_to_dw2regnum (char *regname
)
22203 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
22207 /* PR 16694: Allow VFP registers as well. */
22208 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
22212 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
22221 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
22225 exp
.X_op
= O_secrel
;
22226 exp
.X_add_symbol
= symbol
;
22227 exp
.X_add_number
= 0;
22228 emit_expr (&exp
, size
);
22232 /* MD interface: Symbol and relocation handling. */
22234 /* Return the address within the segment that a PC-relative fixup is
22235 relative to. For ARM, PC-relative fixups applied to instructions
22236 are generally relative to the location of the fixup plus 8 bytes.
22237 Thumb branches are offset by 4, and Thumb loads relative to PC
22238 require special handling. */
22241 md_pcrel_from_section (fixS
* fixP
, segT seg
)
22243 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22245 /* If this is pc-relative and we are going to emit a relocation
22246 then we just want to put out any pipeline compensation that the linker
22247 will need. Otherwise we want to use the calculated base.
22248 For WinCE we skip the bias for externals as well, since this
22249 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22251 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22252 || (arm_force_relocation (fixP
)
22254 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
22260 switch (fixP
->fx_r_type
)
22262 /* PC relative addressing on the Thumb is slightly odd as the
22263 bottom two bits of the PC are forced to zero for the
22264 calculation. This happens *after* application of the
22265 pipeline offset. However, Thumb adrl already adjusts for
22266 this, so we need not do it again. */
22267 case BFD_RELOC_ARM_THUMB_ADD
:
22270 case BFD_RELOC_ARM_THUMB_OFFSET
:
22271 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22272 case BFD_RELOC_ARM_T32_ADD_PC12
:
22273 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
22274 return (base
+ 4) & ~3;
22276 /* Thumb branches are simply offset by +4. */
22277 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
22278 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
22279 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
22280 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22281 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
22284 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22286 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22287 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22288 && ARM_IS_FUNC (fixP
->fx_addsy
)
22289 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22290 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22293 /* BLX is like branches above, but forces the low two bits of PC to
22295 case BFD_RELOC_THUMB_PCREL_BLX
:
22297 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22298 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22299 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22300 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22301 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22302 return (base
+ 4) & ~3;
22304 /* ARM mode branches are offset by +8. However, the Windows CE
22305 loader expects the relocation not to take this into account. */
22306 case BFD_RELOC_ARM_PCREL_BLX
:
22308 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22309 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22310 && ARM_IS_FUNC (fixP
->fx_addsy
)
22311 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22312 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22315 case BFD_RELOC_ARM_PCREL_CALL
:
22317 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22318 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22319 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22320 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22321 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22324 case BFD_RELOC_ARM_PCREL_BRANCH
:
22325 case BFD_RELOC_ARM_PCREL_JUMP
:
22326 case BFD_RELOC_ARM_PLT32
:
22328 /* When handling fixups immediately, because we have already
22329 discovered the value of a symbol, or the address of the frag involved
22330 we must account for the offset by +8, as the OS loader will never see the reloc.
22331 see fixup_segment() in write.c
22332 The S_IS_EXTERNAL test handles the case of global symbols.
22333 Those need the calculated base, not just the pipe compensation the linker will need. */
22335 && fixP
->fx_addsy
!= NULL
22336 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22337 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
22345 /* ARM mode loads relative to PC are also offset by +8. Unlike
22346 branches, the Windows CE loader *does* expect the relocation
22347 to take this into account. */
22348 case BFD_RELOC_ARM_OFFSET_IMM
:
22349 case BFD_RELOC_ARM_OFFSET_IMM8
:
22350 case BFD_RELOC_ARM_HWLITERAL
:
22351 case BFD_RELOC_ARM_LITERAL
:
22352 case BFD_RELOC_ARM_CP_OFF_IMM
:
22356 /* Other PC-relative relocations are un-offset. */
22362 static bfd_boolean flag_warn_syms
= TRUE
;
22365 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
22367 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22368 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22369 does mean that the resulting code might be very confusing to the reader.
22370 Also this warning can be triggered if the user omits an operand before
22371 an immediate address, eg:
22375 GAS treats this as an assignment of the value of the symbol foo to a
22376 symbol LDR, and so (without this code) it will not issue any kind of
22377 warning or error message.
22379 Note - ARM instructions are case-insensitive but the strings in the hash
22380 table are all stored in lower case, so we must first ensure that name is
22382 if (flag_warn_syms
&& arm_ops_hsh
)
22384 char * nbuf
= strdup (name
);
22387 for (p
= nbuf
; *p
; p
++)
22389 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
22391 static struct hash_control
* already_warned
= NULL
;
22393 if (already_warned
== NULL
)
22394 already_warned
= hash_new ();
22395 /* Only warn about the symbol once. To keep the code
22396 simple we let hash_insert do the lookup for us. */
22397 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
22398 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
22407 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22408 Otherwise we have no need to default values of symbols. */
22411 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
22414 if (name
[0] == '_' && name
[1] == 'G'
22415 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
22419 if (symbol_find (name
))
22420 as_bad (_("GOT already in the symbol table"));
22422 GOT_symbol
= symbol_new (name
, undefined_section
,
22423 (valueT
) 0, & zero_address_frag
);
22433 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22434 computed as two separate immediate values, added together. We
22435 already know that this value cannot be computed by just one ARM
22438 static unsigned int
22439 validate_immediate_twopart (unsigned int val
,
22440 unsigned int * highpart
)
22445 for (i
= 0; i
< 32; i
+= 2)
22446 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22452 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22454 else if (a
& 0xff0000)
22456 if (a
& 0xff000000)
22458 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22462 gas_assert (a
& 0xff000000);
22463 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22466 return (a
& 0xff) | (i
<< 7);
22473 validate_offset_imm (unsigned int val
, int hwse
)
22475 if ((hwse
&& val
> 255) || val
> 4095)
22480 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22481 negative immediate constant by altering the instruction. A bit of
22486 by inverting the second operand, and
22489 by negating the second operand. */
22492 negate_data_op (unsigned long * instruction
,
22493 unsigned long value
)
22496 unsigned long negated
, inverted
;
22498 negated
= encode_arm_immediate (-value
);
22499 inverted
= encode_arm_immediate (~value
);
22501 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22504 /* First negates. */
22505 case OPCODE_SUB
: /* ADD <-> SUB */
22506 new_inst
= OPCODE_ADD
;
22511 new_inst
= OPCODE_SUB
;
22515 case OPCODE_CMP
: /* CMP <-> CMN */
22516 new_inst
= OPCODE_CMN
;
22521 new_inst
= OPCODE_CMP
;
22525 /* Now Inverted ops. */
22526 case OPCODE_MOV
: /* MOV <-> MVN */
22527 new_inst
= OPCODE_MVN
;
22532 new_inst
= OPCODE_MOV
;
22536 case OPCODE_AND
: /* AND <-> BIC */
22537 new_inst
= OPCODE_BIC
;
22542 new_inst
= OPCODE_AND
;
22546 case OPCODE_ADC
: /* ADC <-> SBC */
22547 new_inst
= OPCODE_SBC
;
22552 new_inst
= OPCODE_ADC
;
22556 /* We cannot do anything. */
22561 if (value
== (unsigned) FAIL
)
22564 *instruction
&= OPCODE_MASK
;
22565 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22569 /* Like negate_data_op, but for Thumb-2. */
22571 static unsigned int
22572 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22576 unsigned int negated
, inverted
;
22578 negated
= encode_thumb32_immediate (-value
);
22579 inverted
= encode_thumb32_immediate (~value
);
22581 rd
= (*instruction
>> 8) & 0xf;
22582 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22585 /* ADD <-> SUB. Includes CMP <-> CMN. */
22586 case T2_OPCODE_SUB
:
22587 new_inst
= T2_OPCODE_ADD
;
22591 case T2_OPCODE_ADD
:
22592 new_inst
= T2_OPCODE_SUB
;
22596 /* ORR <-> ORN. Includes MOV <-> MVN. */
22597 case T2_OPCODE_ORR
:
22598 new_inst
= T2_OPCODE_ORN
;
22602 case T2_OPCODE_ORN
:
22603 new_inst
= T2_OPCODE_ORR
;
22607 /* AND <-> BIC. TST has no inverted equivalent. */
22608 case T2_OPCODE_AND
:
22609 new_inst
= T2_OPCODE_BIC
;
22616 case T2_OPCODE_BIC
:
22617 new_inst
= T2_OPCODE_AND
;
22622 case T2_OPCODE_ADC
:
22623 new_inst
= T2_OPCODE_SBC
;
22627 case T2_OPCODE_SBC
:
22628 new_inst
= T2_OPCODE_ADC
;
22632 /* We cannot do anything. */
22637 if (value
== (unsigned int)FAIL
)
22640 *instruction
&= T2_OPCODE_MASK
;
22641 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22645 /* Read a 32-bit thumb instruction from buf. */
22646 static unsigned long
22647 get_thumb32_insn (char * buf
)
22649 unsigned long insn
;
22650 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22651 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22657 /* We usually want to set the low bit on the address of thumb function
22658 symbols. In particular .word foo - . should have the low bit set.
22659 Generic code tries to fold the difference of two symbols to
22660 a constant. Prevent this and force a relocation when the first symbols
22661 is a thumb function. */
22664 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22666 if (op
== O_subtract
22667 && l
->X_op
== O_symbol
22668 && r
->X_op
== O_symbol
22669 && THUMB_IS_FUNC (l
->X_add_symbol
))
22671 l
->X_op
= O_subtract
;
22672 l
->X_op_symbol
= r
->X_add_symbol
;
22673 l
->X_add_number
-= r
->X_add_number
;
22677 /* Process as normal. */
22681 /* Encode Thumb2 unconditional branches and calls. The encoding
22682 for the 2 are identical for the immediate values. */
22685 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22687 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22690 addressT S
, I1
, I2
, lo
, hi
;
22692 S
= (value
>> 24) & 0x01;
22693 I1
= (value
>> 23) & 0x01;
22694 I2
= (value
>> 22) & 0x01;
22695 hi
= (value
>> 12) & 0x3ff;
22696 lo
= (value
>> 1) & 0x7ff;
22697 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22698 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22699 newval
|= (S
<< 10) | hi
;
22700 newval2
&= ~T2I1I2MASK
;
22701 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22702 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22703 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22707 md_apply_fix (fixS
* fixP
,
22711 offsetT value
= * valP
;
22713 unsigned int newimm
;
22714 unsigned long temp
;
22716 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22718 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22720 /* Note whether this will delete the relocation. */
22722 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22725 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22726 consistency with the behaviour on 32-bit hosts. Remember value
22728 value
&= 0xffffffff;
22729 value
^= 0x80000000;
22730 value
-= 0x80000000;
22733 fixP
->fx_addnumber
= value
;
22735 /* Same treatment for fixP->fx_offset. */
22736 fixP
->fx_offset
&= 0xffffffff;
22737 fixP
->fx_offset
^= 0x80000000;
22738 fixP
->fx_offset
-= 0x80000000;
22740 switch (fixP
->fx_r_type
)
22742 case BFD_RELOC_NONE
:
22743 /* This will need to go in the object file. */
22747 case BFD_RELOC_ARM_IMMEDIATE
:
22748 /* We claim that this fixup has been processed here,
22749 even if in fact we generate an error because we do
22750 not have a reloc for it, so tc_gen_reloc will reject it. */
22753 if (fixP
->fx_addsy
)
22755 const char *msg
= 0;
22757 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22758 msg
= _("undefined symbol %s used as an immediate value");
22759 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22760 msg
= _("symbol %s is in a different section");
22761 else if (S_IS_WEAK (fixP
->fx_addsy
))
22762 msg
= _("symbol %s is weak and may be overridden later");
22766 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22767 msg
, S_GET_NAME (fixP
->fx_addsy
));
22772 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22774 /* If the offset is negative, we should use encoding A2 for ADR. */
22775 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22776 newimm
= negate_data_op (&temp
, value
);
22779 newimm
= encode_arm_immediate (value
);
22781 /* If the instruction will fail, see if we can fix things up by
22782 changing the opcode. */
22783 if (newimm
== (unsigned int) FAIL
)
22784 newimm
= negate_data_op (&temp
, value
);
22785 /* MOV accepts both ARM modified immediate (A1 encoding) and
22786 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
22787 When disassembling, MOV is preferred when there is no encoding
22789 if (newimm
== (unsigned int) FAIL
22790 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
22791 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
22792 && !((temp
>> SBIT_SHIFT
) & 0x1)
22793 && value
>= 0 && value
<= 0xffff)
22795 /* Clear bits[23:20] to change encoding from A1 to A2. */
22796 temp
&= 0xff0fffff;
22797 /* Encoding high 4bits imm. Code below will encode the remaining
22799 temp
|= (value
& 0x0000f000) << 4;
22800 newimm
= value
& 0x00000fff;
22804 if (newimm
== (unsigned int) FAIL
)
22806 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22807 _("invalid constant (%lx) after fixup"),
22808 (unsigned long) value
);
22812 newimm
|= (temp
& 0xfffff000);
22813 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22816 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22818 unsigned int highpart
= 0;
22819 unsigned int newinsn
= 0xe1a00000; /* nop. */
22821 if (fixP
->fx_addsy
)
22823 const char *msg
= 0;
22825 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22826 msg
= _("undefined symbol %s used as an immediate value");
22827 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22828 msg
= _("symbol %s is in a different section");
22829 else if (S_IS_WEAK (fixP
->fx_addsy
))
22830 msg
= _("symbol %s is weak and may be overridden later");
22834 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22835 msg
, S_GET_NAME (fixP
->fx_addsy
));
22840 newimm
= encode_arm_immediate (value
);
22841 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22843 /* If the instruction will fail, see if we can fix things up by
22844 changing the opcode. */
22845 if (newimm
== (unsigned int) FAIL
22846 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22848 /* No ? OK - try using two ADD instructions to generate
22850 newimm
= validate_immediate_twopart (value
, & highpart
);
22852 /* Yes - then make sure that the second instruction is
22854 if (newimm
!= (unsigned int) FAIL
)
22856 /* Still No ? Try using a negated value. */
22857 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22858 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22859 /* Otherwise - give up. */
22862 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22863 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22868 /* Replace the first operand in the 2nd instruction (which
22869 is the PC) with the destination register. We have
22870 already added in the PC in the first instruction and we
22871 do not want to do it again. */
22872 newinsn
&= ~ 0xf0000;
22873 newinsn
|= ((newinsn
& 0x0f000) << 4);
22876 newimm
|= (temp
& 0xfffff000);
22877 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22879 highpart
|= (newinsn
& 0xfffff000);
22880 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
22884 case BFD_RELOC_ARM_OFFSET_IMM
:
22885 if (!fixP
->fx_done
&& seg
->use_rela_p
)
22887 /* Fall through. */
22889 case BFD_RELOC_ARM_LITERAL
:
22895 if (validate_offset_imm (value
, 0) == FAIL
)
22897 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
22898 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22899 _("invalid literal constant: pool needs to be closer"));
22901 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22902 _("bad immediate value for offset (%ld)"),
22907 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22909 newval
&= 0xfffff000;
22912 newval
&= 0xff7ff000;
22913 newval
|= value
| (sign
? INDEX_UP
: 0);
22915 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22918 case BFD_RELOC_ARM_OFFSET_IMM8
:
22919 case BFD_RELOC_ARM_HWLITERAL
:
22925 if (validate_offset_imm (value
, 1) == FAIL
)
22927 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
22928 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22929 _("invalid literal constant: pool needs to be closer"));
22931 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22932 _("bad immediate value for 8-bit offset (%ld)"),
22937 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22939 newval
&= 0xfffff0f0;
22942 newval
&= 0xff7ff0f0;
22943 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
22945 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22948 case BFD_RELOC_ARM_T32_OFFSET_U8
:
22949 if (value
< 0 || value
> 1020 || value
% 4 != 0)
22950 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22951 _("bad immediate value for offset (%ld)"), (long) value
);
22954 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
22956 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
22959 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22960 /* This is a complicated relocation used for all varieties of Thumb32
22961 load/store instruction with immediate offset:
22963 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22964 *4, optional writeback(W)
22965 (doubleword load/store)
22967 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22968 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22969 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22970 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22971 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22973 Uppercase letters indicate bits that are already encoded at
22974 this point. Lowercase letters are our problem. For the
22975 second block of instructions, the secondary opcode nybble
22976 (bits 8..11) is present, and bit 23 is zero, even if this is
22977 a PC-relative operation. */
22978 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22980 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
22982 if ((newval
& 0xf0000000) == 0xe0000000)
22984 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22986 newval
|= (1 << 23);
22989 if (value
% 4 != 0)
22991 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22992 _("offset not a multiple of 4"));
22998 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22999 _("offset out of range"));
23004 else if ((newval
& 0x000f0000) == 0x000f0000)
23006 /* PC-relative, 12-bit offset. */
23008 newval
|= (1 << 23);
23013 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23014 _("offset out of range"));
23019 else if ((newval
& 0x00000100) == 0x00000100)
23021 /* Writeback: 8-bit, +/- offset. */
23023 newval
|= (1 << 9);
23028 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23029 _("offset out of range"));
23034 else if ((newval
& 0x00000f00) == 0x00000e00)
23036 /* T-instruction: positive 8-bit offset. */
23037 if (value
< 0 || value
> 0xff)
23039 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23040 _("offset out of range"));
23048 /* Positive 12-bit or negative 8-bit offset. */
23052 newval
|= (1 << 23);
23062 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23063 _("offset out of range"));
23070 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
23071 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
23074 case BFD_RELOC_ARM_SHIFT_IMM
:
23075 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23076 if (((unsigned long) value
) > 32
23078 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
23080 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23081 _("shift expression is too large"));
23086 /* Shifts of zero must be done as lsl. */
23088 else if (value
== 32)
23090 newval
&= 0xfffff07f;
23091 newval
|= (value
& 0x1f) << 7;
23092 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23095 case BFD_RELOC_ARM_T32_IMMEDIATE
:
23096 case BFD_RELOC_ARM_T32_ADD_IMM
:
23097 case BFD_RELOC_ARM_T32_IMM12
:
23098 case BFD_RELOC_ARM_T32_ADD_PC12
:
23099 /* We claim that this fixup has been processed here,
23100 even if in fact we generate an error because we do
23101 not have a reloc for it, so tc_gen_reloc will reject it. */
23105 && ! S_IS_DEFINED (fixP
->fx_addsy
))
23107 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23108 _("undefined symbol %s used as an immediate value"),
23109 S_GET_NAME (fixP
->fx_addsy
));
23113 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23115 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
23118 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
23119 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23120 Thumb2 modified immediate encoding (T2). */
23121 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
23122 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23124 newimm
= encode_thumb32_immediate (value
);
23125 if (newimm
== (unsigned int) FAIL
)
23126 newimm
= thumb32_negate_data_op (&newval
, value
);
23128 if (newimm
== (unsigned int) FAIL
)
23130 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
23132 /* Turn add/sum into addw/subw. */
23133 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23134 newval
= (newval
& 0xfeffffff) | 0x02000000;
23135 /* No flat 12-bit imm encoding for addsw/subsw. */
23136 if ((newval
& 0x00100000) == 0)
23138 /* 12 bit immediate for addw/subw. */
23142 newval
^= 0x00a00000;
23145 newimm
= (unsigned int) FAIL
;
23152 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23153 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23154 disassembling, MOV is preferred when there is no encoding
23156 NOTE: MOV is using ORR opcode under Thumb 2 mode. */
23157 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
23158 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
23159 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
23160 && value
>= 0 && value
<=0xffff)
23162 /* Toggle bit[25] to change encoding from T2 to T3. */
23164 /* Clear bits[19:16]. */
23165 newval
&= 0xfff0ffff;
23166 /* Encoding high 4bits imm. Code below will encode the
23167 remaining low 12bits. */
23168 newval
|= (value
& 0x0000f000) << 4;
23169 newimm
= value
& 0x00000fff;
23174 if (newimm
== (unsigned int)FAIL
)
23176 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23177 _("invalid constant (%lx) after fixup"),
23178 (unsigned long) value
);
23182 newval
|= (newimm
& 0x800) << 15;
23183 newval
|= (newimm
& 0x700) << 4;
23184 newval
|= (newimm
& 0x0ff);
23186 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
23187 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
23190 case BFD_RELOC_ARM_SMC
:
23191 if (((unsigned long) value
) > 0xffff)
23192 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23193 _("invalid smc expression"));
23194 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23195 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23196 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23199 case BFD_RELOC_ARM_HVC
:
23200 if (((unsigned long) value
) > 0xffff)
23201 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23202 _("invalid hvc expression"));
23203 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23204 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23205 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23208 case BFD_RELOC_ARM_SWI
:
23209 if (fixP
->tc_fix_data
!= 0)
23211 if (((unsigned long) value
) > 0xff)
23212 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23213 _("invalid swi expression"));
23214 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23216 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23220 if (((unsigned long) value
) > 0x00ffffff)
23221 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23222 _("invalid swi expression"));
23223 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23225 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23229 case BFD_RELOC_ARM_MULTI
:
23230 if (((unsigned long) value
) > 0xffff)
23231 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23232 _("invalid expression in load/store multiple"));
23233 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
23234 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23238 case BFD_RELOC_ARM_PCREL_CALL
:
23240 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23242 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23243 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23244 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23245 /* Flip the bl to blx. This is a simple flip
23246 bit here because we generate PCREL_CALL for
23247 unconditional bls. */
23249 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23250 newval
= newval
| 0x10000000;
23251 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23257 goto arm_branch_common
;
23259 case BFD_RELOC_ARM_PCREL_JUMP
:
23260 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23262 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23263 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23264 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23266 /* This would map to a bl<cond>, b<cond>,
23267 b<always> to a Thumb function. We
23268 need to force a relocation for this particular
23270 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23273 /* Fall through. */
23275 case BFD_RELOC_ARM_PLT32
:
23277 case BFD_RELOC_ARM_PCREL_BRANCH
:
23279 goto arm_branch_common
;
23281 case BFD_RELOC_ARM_PCREL_BLX
:
23284 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23286 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23287 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23288 && ARM_IS_FUNC (fixP
->fx_addsy
))
23290 /* Flip the blx to a bl and warn. */
23291 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23292 newval
= 0xeb000000;
23293 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23294 _("blx to '%s' an ARM ISA state function changed to bl"),
23296 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23302 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23303 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
23307 /* We are going to store value (shifted right by two) in the
23308 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23309 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23310 also be be clear. */
23312 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23313 _("misaligned branch destination"));
23314 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
23315 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
23316 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23318 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23320 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23321 newval
|= (value
>> 2) & 0x00ffffff;
23322 /* Set the H bit on BLX instructions. */
23326 newval
|= 0x01000000;
23328 newval
&= ~0x01000000;
23330 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23334 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
23335 /* CBZ can only branch forward. */
23337 /* Attempts to use CBZ to branch to the next instruction
23338 (which, strictly speaking, are prohibited) will be turned into
23341 FIXME: It may be better to remove the instruction completely and
23342 perform relaxation. */
23345 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23346 newval
= 0xbf00; /* NOP encoding T1 */
23347 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23352 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23354 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23356 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23357 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
23358 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23363 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
23364 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
23365 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23367 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23369 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23370 newval
|= (value
& 0x1ff) >> 1;
23371 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23375 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
23376 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
23377 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23379 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23381 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23382 newval
|= (value
& 0xfff) >> 1;
23383 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23387 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23389 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23390 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23391 && ARM_IS_FUNC (fixP
->fx_addsy
)
23392 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23394 /* Force a relocation for a branch 20 bits wide. */
23397 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
23398 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23399 _("conditional branch out of range"));
23401 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23404 addressT S
, J1
, J2
, lo
, hi
;
23406 S
= (value
& 0x00100000) >> 20;
23407 J2
= (value
& 0x00080000) >> 19;
23408 J1
= (value
& 0x00040000) >> 18;
23409 hi
= (value
& 0x0003f000) >> 12;
23410 lo
= (value
& 0x00000ffe) >> 1;
23412 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23413 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23414 newval
|= (S
<< 10) | hi
;
23415 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
23416 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23417 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23421 case BFD_RELOC_THUMB_PCREL_BLX
:
23422 /* If there is a blx from a thumb state function to
23423 another thumb function flip this to a bl and warn
23427 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23428 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23429 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23431 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23432 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23433 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23435 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23436 newval
= newval
| 0x1000;
23437 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23438 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23443 goto thumb_bl_common
;
23445 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23446 /* A bl from Thumb state ISA to an internal ARM state function
23447 is converted to a blx. */
23449 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23450 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23451 && ARM_IS_FUNC (fixP
->fx_addsy
)
23452 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23454 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23455 newval
= newval
& ~0x1000;
23456 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23457 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
23463 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23464 /* For a BLX instruction, make sure that the relocation is rounded up
23465 to a word boundary. This follows the semantics of the instruction
23466 which specifies that bit 1 of the target address will come from bit
23467 1 of the base address. */
23468 value
= (value
+ 3) & ~ 3;
23471 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
23472 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23473 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23476 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
23478 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
23479 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23480 else if ((value
& ~0x1ffffff)
23481 && ((value
& ~0x1ffffff) != ~0x1ffffff))
23482 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23483 _("Thumb2 branch out of range"));
23486 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23487 encode_thumb2_b_bl_offset (buf
, value
);
23491 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23492 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
23493 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23495 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23496 encode_thumb2_b_bl_offset (buf
, value
);
23501 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23506 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23507 md_number_to_chars (buf
, value
, 2);
23511 case BFD_RELOC_ARM_TLS_CALL
:
23512 case BFD_RELOC_ARM_THM_TLS_CALL
:
23513 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23514 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23515 case BFD_RELOC_ARM_TLS_GOTDESC
:
23516 case BFD_RELOC_ARM_TLS_GD32
:
23517 case BFD_RELOC_ARM_TLS_LE32
:
23518 case BFD_RELOC_ARM_TLS_IE32
:
23519 case BFD_RELOC_ARM_TLS_LDM32
:
23520 case BFD_RELOC_ARM_TLS_LDO32
:
23521 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23524 case BFD_RELOC_ARM_GOT32
:
23525 case BFD_RELOC_ARM_GOTOFF
:
23528 case BFD_RELOC_ARM_GOT_PREL
:
23529 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23530 md_number_to_chars (buf
, value
, 4);
23533 case BFD_RELOC_ARM_TARGET2
:
23534 /* TARGET2 is not partial-inplace, so we need to write the
23535 addend here for REL targets, because it won't be written out
23536 during reloc processing later. */
23537 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23538 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23542 case BFD_RELOC_RVA
:
23544 case BFD_RELOC_ARM_TARGET1
:
23545 case BFD_RELOC_ARM_ROSEGREL32
:
23546 case BFD_RELOC_ARM_SBREL32
:
23547 case BFD_RELOC_32_PCREL
:
23549 case BFD_RELOC_32_SECREL
:
23551 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23553 /* For WinCE we only do this for pcrel fixups. */
23554 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23556 md_number_to_chars (buf
, value
, 4);
23560 case BFD_RELOC_ARM_PREL31
:
23561 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23563 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23564 if ((value
^ (value
>> 1)) & 0x40000000)
23566 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23567 _("rel31 relocation overflow"));
23569 newval
|= value
& 0x7fffffff;
23570 md_number_to_chars (buf
, newval
, 4);
23575 case BFD_RELOC_ARM_CP_OFF_IMM
:
23576 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23577 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
23578 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23580 newval
= get_thumb32_insn (buf
);
23581 if ((newval
& 0x0f200f00) == 0x0d000900)
23583 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23584 has permitted values that are multiples of 2, in the range 0
23586 if (value
< -510 || value
> 510 || (value
& 1))
23587 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23588 _("co-processor offset out of range"));
23590 else if (value
< -1023 || value
> 1023 || (value
& 3))
23591 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23592 _("co-processor offset out of range"));
23597 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23598 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23599 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23601 newval
= get_thumb32_insn (buf
);
23603 newval
&= 0xffffff00;
23606 newval
&= 0xff7fff00;
23607 if ((newval
& 0x0f200f00) == 0x0d000900)
23609 /* This is a fp16 vstr/vldr.
23611 It requires the immediate offset in the instruction is shifted
23612 left by 1 to be a half-word offset.
23614 Here, left shift by 1 first, and later right shift by 2
23615 should get the right offset. */
23618 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23620 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23621 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23622 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23624 put_thumb32_insn (buf
, newval
);
23627 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23628 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23629 if (value
< -255 || value
> 255)
23630 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23631 _("co-processor offset out of range"));
23633 goto cp_off_common
;
23635 case BFD_RELOC_ARM_THUMB_OFFSET
:
23636 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23637 /* Exactly what ranges, and where the offset is inserted depends
23638 on the type of instruction, we can establish this from the
23640 switch (newval
>> 12)
23642 case 4: /* PC load. */
23643 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23644 forced to zero for these loads; md_pcrel_from has already
23645 compensated for this. */
23647 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23648 _("invalid offset, target not word aligned (0x%08lX)"),
23649 (((unsigned long) fixP
->fx_frag
->fr_address
23650 + (unsigned long) fixP
->fx_where
) & ~3)
23651 + (unsigned long) value
);
23653 if (value
& ~0x3fc)
23654 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23655 _("invalid offset, value too big (0x%08lX)"),
23658 newval
|= value
>> 2;
23661 case 9: /* SP load/store. */
23662 if (value
& ~0x3fc)
23663 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23664 _("invalid offset, value too big (0x%08lX)"),
23666 newval
|= value
>> 2;
23669 case 6: /* Word load/store. */
23671 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23672 _("invalid offset, value too big (0x%08lX)"),
23674 newval
|= value
<< 4; /* 6 - 2. */
23677 case 7: /* Byte load/store. */
23679 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23680 _("invalid offset, value too big (0x%08lX)"),
23682 newval
|= value
<< 6;
23685 case 8: /* Halfword load/store. */
23687 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23688 _("invalid offset, value too big (0x%08lX)"),
23690 newval
|= value
<< 5; /* 6 - 1. */
23694 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23695 "Unable to process relocation for thumb opcode: %lx",
23696 (unsigned long) newval
);
23699 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23702 case BFD_RELOC_ARM_THUMB_ADD
:
23703 /* This is a complicated relocation, since we use it for all of
23704 the following immediate relocations:
23708 9bit ADD/SUB SP word-aligned
23709 10bit ADD PC/SP word-aligned
23711 The type of instruction being processed is encoded in the
23718 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23720 int rd
= (newval
>> 4) & 0xf;
23721 int rs
= newval
& 0xf;
23722 int subtract
= !!(newval
& 0x8000);
23724 /* Check for HI regs, only very restricted cases allowed:
23725 Adjusting SP, and using PC or SP to get an address. */
23726 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23727 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23728 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23729 _("invalid Hi register with immediate"));
23731 /* If value is negative, choose the opposite instruction. */
23735 subtract
= !subtract
;
23737 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23738 _("immediate value out of range"));
23743 if (value
& ~0x1fc)
23744 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23745 _("invalid immediate for stack address calculation"));
23746 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23747 newval
|= value
>> 2;
23749 else if (rs
== REG_PC
|| rs
== REG_SP
)
23751 /* PR gas/18541. If the addition is for a defined symbol
23752 within range of an ADR instruction then accept it. */
23755 && fixP
->fx_addsy
!= NULL
)
23759 if (! S_IS_DEFINED (fixP
->fx_addsy
)
23760 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
23761 || S_IS_WEAK (fixP
->fx_addsy
))
23763 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23764 _("address calculation needs a strongly defined nearby symbol"));
23768 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23770 /* Round up to the next 4-byte boundary. */
23775 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
23779 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23780 _("symbol too far away"));
23790 if (subtract
|| value
& ~0x3fc)
23791 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23792 _("invalid immediate for address calculation (value = 0x%08lX)"),
23793 (unsigned long) (subtract
? - value
: value
));
23794 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
23796 newval
|= value
>> 2;
23801 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23802 _("immediate value out of range"));
23803 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
23804 newval
|= (rd
<< 8) | value
;
23809 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23810 _("immediate value out of range"));
23811 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
23812 newval
|= rd
| (rs
<< 3) | (value
<< 6);
23815 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23818 case BFD_RELOC_ARM_THUMB_IMM
:
23819 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23820 if (value
< 0 || value
> 255)
23821 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23822 _("invalid immediate: %ld is out of range"),
23825 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23828 case BFD_RELOC_ARM_THUMB_SHIFT
:
23829 /* 5bit shift value (0..32). LSL cannot take 32. */
23830 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
23831 temp
= newval
& 0xf800;
23832 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
23833 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23834 _("invalid shift value: %ld"), (long) value
);
23835 /* Shifts of zero must be encoded as LSL. */
23837 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
23838 /* Shifts of 32 are encoded as zero. */
23839 else if (value
== 32)
23841 newval
|= value
<< 6;
23842 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23845 case BFD_RELOC_VTABLE_INHERIT
:
23846 case BFD_RELOC_VTABLE_ENTRY
:
23850 case BFD_RELOC_ARM_MOVW
:
23851 case BFD_RELOC_ARM_MOVT
:
23852 case BFD_RELOC_ARM_THUMB_MOVW
:
23853 case BFD_RELOC_ARM_THUMB_MOVT
:
23854 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23856 /* REL format relocations are limited to a 16-bit addend. */
23857 if (!fixP
->fx_done
)
23859 if (value
< -0x8000 || value
> 0x7fff)
23860 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23861 _("offset out of range"));
23863 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23864 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23869 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23870 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23872 newval
= get_thumb32_insn (buf
);
23873 newval
&= 0xfbf08f00;
23874 newval
|= (value
& 0xf000) << 4;
23875 newval
|= (value
& 0x0800) << 15;
23876 newval
|= (value
& 0x0700) << 4;
23877 newval
|= (value
& 0x00ff);
23878 put_thumb32_insn (buf
, newval
);
23882 newval
= md_chars_to_number (buf
, 4);
23883 newval
&= 0xfff0f000;
23884 newval
|= value
& 0x0fff;
23885 newval
|= (value
& 0xf000) << 4;
23886 md_number_to_chars (buf
, newval
, 4);
23891 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23892 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23893 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23894 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23895 gas_assert (!fixP
->fx_done
);
23898 bfd_boolean is_mov
;
23899 bfd_vma encoded_addend
= value
;
23901 /* Check that addend can be encoded in instruction. */
23902 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
23903 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23904 _("the offset 0x%08lX is not representable"),
23905 (unsigned long) encoded_addend
);
23907 /* Extract the instruction. */
23908 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
23909 is_mov
= (insn
& 0xf800) == 0x2000;
23914 if (!seg
->use_rela_p
)
23915 insn
|= encoded_addend
;
23921 /* Extract the instruction. */
23922 /* Encoding is the following
23927 /* The following conditions must be true :
23932 rd
= (insn
>> 4) & 0xf;
23934 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
23935 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23936 _("Unable to process relocation for thumb opcode: %lx"),
23937 (unsigned long) insn
);
23939 /* Encode as ADD immediate8 thumb 1 code. */
23940 insn
= 0x3000 | (rd
<< 8);
23942 /* Place the encoded addend into the first 8 bits of the
23944 if (!seg
->use_rela_p
)
23945 insn
|= encoded_addend
;
23948 /* Update the instruction. */
23949 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
23953 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23954 case BFD_RELOC_ARM_ALU_PC_G0
:
23955 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23956 case BFD_RELOC_ARM_ALU_PC_G1
:
23957 case BFD_RELOC_ARM_ALU_PC_G2
:
23958 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23959 case BFD_RELOC_ARM_ALU_SB_G0
:
23960 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23961 case BFD_RELOC_ARM_ALU_SB_G1
:
23962 case BFD_RELOC_ARM_ALU_SB_G2
:
23963 gas_assert (!fixP
->fx_done
);
23964 if (!seg
->use_rela_p
)
23967 bfd_vma encoded_addend
;
23968 bfd_vma addend_abs
= abs (value
);
23970 /* Check that the absolute value of the addend can be
23971 expressed as an 8-bit constant plus a rotation. */
23972 encoded_addend
= encode_arm_immediate (addend_abs
);
23973 if (encoded_addend
== (unsigned int) FAIL
)
23974 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23975 _("the offset 0x%08lX is not representable"),
23976 (unsigned long) addend_abs
);
23978 /* Extract the instruction. */
23979 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23981 /* If the addend is positive, use an ADD instruction.
23982 Otherwise use a SUB. Take care not to destroy the S bit. */
23983 insn
&= 0xff1fffff;
23989 /* Place the encoded addend into the first 12 bits of the
23991 insn
&= 0xfffff000;
23992 insn
|= encoded_addend
;
23994 /* Update the instruction. */
23995 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23999 case BFD_RELOC_ARM_LDR_PC_G0
:
24000 case BFD_RELOC_ARM_LDR_PC_G1
:
24001 case BFD_RELOC_ARM_LDR_PC_G2
:
24002 case BFD_RELOC_ARM_LDR_SB_G0
:
24003 case BFD_RELOC_ARM_LDR_SB_G1
:
24004 case BFD_RELOC_ARM_LDR_SB_G2
:
24005 gas_assert (!fixP
->fx_done
);
24006 if (!seg
->use_rela_p
)
24009 bfd_vma addend_abs
= abs (value
);
24011 /* Check that the absolute value of the addend can be
24012 encoded in 12 bits. */
24013 if (addend_abs
>= 0x1000)
24014 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24015 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24016 (unsigned long) addend_abs
);
24018 /* Extract the instruction. */
24019 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24021 /* If the addend is negative, clear bit 23 of the instruction.
24022 Otherwise set it. */
24024 insn
&= ~(1 << 23);
24028 /* Place the absolute value of the addend into the first 12 bits
24029 of the instruction. */
24030 insn
&= 0xfffff000;
24031 insn
|= addend_abs
;
24033 /* Update the instruction. */
24034 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24038 case BFD_RELOC_ARM_LDRS_PC_G0
:
24039 case BFD_RELOC_ARM_LDRS_PC_G1
:
24040 case BFD_RELOC_ARM_LDRS_PC_G2
:
24041 case BFD_RELOC_ARM_LDRS_SB_G0
:
24042 case BFD_RELOC_ARM_LDRS_SB_G1
:
24043 case BFD_RELOC_ARM_LDRS_SB_G2
:
24044 gas_assert (!fixP
->fx_done
);
24045 if (!seg
->use_rela_p
)
24048 bfd_vma addend_abs
= abs (value
);
24050 /* Check that the absolute value of the addend can be
24051 encoded in 8 bits. */
24052 if (addend_abs
>= 0x100)
24053 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24054 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24055 (unsigned long) addend_abs
);
24057 /* Extract the instruction. */
24058 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24060 /* If the addend is negative, clear bit 23 of the instruction.
24061 Otherwise set it. */
24063 insn
&= ~(1 << 23);
24067 /* Place the first four bits of the absolute value of the addend
24068 into the first 4 bits of the instruction, and the remaining
24069 four into bits 8 .. 11. */
24070 insn
&= 0xfffff0f0;
24071 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
24073 /* Update the instruction. */
24074 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24078 case BFD_RELOC_ARM_LDC_PC_G0
:
24079 case BFD_RELOC_ARM_LDC_PC_G1
:
24080 case BFD_RELOC_ARM_LDC_PC_G2
:
24081 case BFD_RELOC_ARM_LDC_SB_G0
:
24082 case BFD_RELOC_ARM_LDC_SB_G1
:
24083 case BFD_RELOC_ARM_LDC_SB_G2
:
24084 gas_assert (!fixP
->fx_done
);
24085 if (!seg
->use_rela_p
)
24088 bfd_vma addend_abs
= abs (value
);
24090 /* Check that the absolute value of the addend is a multiple of
24091 four and, when divided by four, fits in 8 bits. */
24092 if (addend_abs
& 0x3)
24093 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24094 _("bad offset 0x%08lX (must be word-aligned)"),
24095 (unsigned long) addend_abs
);
24097 if ((addend_abs
>> 2) > 0xff)
24098 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24099 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24100 (unsigned long) addend_abs
);
24102 /* Extract the instruction. */
24103 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24105 /* If the addend is negative, clear bit 23 of the instruction.
24106 Otherwise set it. */
24108 insn
&= ~(1 << 23);
24112 /* Place the addend (divided by four) into the first eight
24113 bits of the instruction. */
24114 insn
&= 0xfffffff0;
24115 insn
|= addend_abs
>> 2;
24117 /* Update the instruction. */
24118 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24122 case BFD_RELOC_ARM_V4BX
:
24123 /* This will need to go in the object file. */
24127 case BFD_RELOC_UNUSED
:
24129 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24130 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
24134 /* Translate internal representation of relocation info to BFD target
24138 tc_gen_reloc (asection
*section
, fixS
*fixp
)
24141 bfd_reloc_code_real_type code
;
24143 reloc
= XNEW (arelent
);
24145 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
24146 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
24147 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
24149 if (fixp
->fx_pcrel
)
24151 if (section
->use_rela_p
)
24152 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
24154 fixp
->fx_offset
= reloc
->address
;
24156 reloc
->addend
= fixp
->fx_offset
;
24158 switch (fixp
->fx_r_type
)
24161 if (fixp
->fx_pcrel
)
24163 code
= BFD_RELOC_8_PCREL
;
24166 /* Fall through. */
24169 if (fixp
->fx_pcrel
)
24171 code
= BFD_RELOC_16_PCREL
;
24174 /* Fall through. */
24177 if (fixp
->fx_pcrel
)
24179 code
= BFD_RELOC_32_PCREL
;
24182 /* Fall through. */
24184 case BFD_RELOC_ARM_MOVW
:
24185 if (fixp
->fx_pcrel
)
24187 code
= BFD_RELOC_ARM_MOVW_PCREL
;
24190 /* Fall through. */
24192 case BFD_RELOC_ARM_MOVT
:
24193 if (fixp
->fx_pcrel
)
24195 code
= BFD_RELOC_ARM_MOVT_PCREL
;
24198 /* Fall through. */
24200 case BFD_RELOC_ARM_THUMB_MOVW
:
24201 if (fixp
->fx_pcrel
)
24203 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
24206 /* Fall through. */
24208 case BFD_RELOC_ARM_THUMB_MOVT
:
24209 if (fixp
->fx_pcrel
)
24211 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
24214 /* Fall through. */
24216 case BFD_RELOC_NONE
:
24217 case BFD_RELOC_ARM_PCREL_BRANCH
:
24218 case BFD_RELOC_ARM_PCREL_BLX
:
24219 case BFD_RELOC_RVA
:
24220 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24221 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24222 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24223 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24224 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24225 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24226 case BFD_RELOC_VTABLE_ENTRY
:
24227 case BFD_RELOC_VTABLE_INHERIT
:
24229 case BFD_RELOC_32_SECREL
:
24231 code
= fixp
->fx_r_type
;
24234 case BFD_RELOC_THUMB_PCREL_BLX
:
24236 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
24237 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24240 code
= BFD_RELOC_THUMB_PCREL_BLX
;
24243 case BFD_RELOC_ARM_LITERAL
:
24244 case BFD_RELOC_ARM_HWLITERAL
:
24245 /* If this is called then the a literal has
24246 been referenced across a section boundary. */
24247 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24248 _("literal referenced across section boundary"));
24252 case BFD_RELOC_ARM_TLS_CALL
:
24253 case BFD_RELOC_ARM_THM_TLS_CALL
:
24254 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24255 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24256 case BFD_RELOC_ARM_GOT32
:
24257 case BFD_RELOC_ARM_GOTOFF
:
24258 case BFD_RELOC_ARM_GOT_PREL
:
24259 case BFD_RELOC_ARM_PLT32
:
24260 case BFD_RELOC_ARM_TARGET1
:
24261 case BFD_RELOC_ARM_ROSEGREL32
:
24262 case BFD_RELOC_ARM_SBREL32
:
24263 case BFD_RELOC_ARM_PREL31
:
24264 case BFD_RELOC_ARM_TARGET2
:
24265 case BFD_RELOC_ARM_TLS_LDO32
:
24266 case BFD_RELOC_ARM_PCREL_CALL
:
24267 case BFD_RELOC_ARM_PCREL_JUMP
:
24268 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
24269 case BFD_RELOC_ARM_ALU_PC_G0
:
24270 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
24271 case BFD_RELOC_ARM_ALU_PC_G1
:
24272 case BFD_RELOC_ARM_ALU_PC_G2
:
24273 case BFD_RELOC_ARM_LDR_PC_G0
:
24274 case BFD_RELOC_ARM_LDR_PC_G1
:
24275 case BFD_RELOC_ARM_LDR_PC_G2
:
24276 case BFD_RELOC_ARM_LDRS_PC_G0
:
24277 case BFD_RELOC_ARM_LDRS_PC_G1
:
24278 case BFD_RELOC_ARM_LDRS_PC_G2
:
24279 case BFD_RELOC_ARM_LDC_PC_G0
:
24280 case BFD_RELOC_ARM_LDC_PC_G1
:
24281 case BFD_RELOC_ARM_LDC_PC_G2
:
24282 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
24283 case BFD_RELOC_ARM_ALU_SB_G0
:
24284 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
24285 case BFD_RELOC_ARM_ALU_SB_G1
:
24286 case BFD_RELOC_ARM_ALU_SB_G2
:
24287 case BFD_RELOC_ARM_LDR_SB_G0
:
24288 case BFD_RELOC_ARM_LDR_SB_G1
:
24289 case BFD_RELOC_ARM_LDR_SB_G2
:
24290 case BFD_RELOC_ARM_LDRS_SB_G0
:
24291 case BFD_RELOC_ARM_LDRS_SB_G1
:
24292 case BFD_RELOC_ARM_LDRS_SB_G2
:
24293 case BFD_RELOC_ARM_LDC_SB_G0
:
24294 case BFD_RELOC_ARM_LDC_SB_G1
:
24295 case BFD_RELOC_ARM_LDC_SB_G2
:
24296 case BFD_RELOC_ARM_V4BX
:
24297 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24298 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24299 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24300 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24301 code
= fixp
->fx_r_type
;
24304 case BFD_RELOC_ARM_TLS_GOTDESC
:
24305 case BFD_RELOC_ARM_TLS_GD32
:
24306 case BFD_RELOC_ARM_TLS_LE32
:
24307 case BFD_RELOC_ARM_TLS_IE32
:
24308 case BFD_RELOC_ARM_TLS_LDM32
:
24309 /* BFD will include the symbol's address in the addend.
24310 But we don't want that, so subtract it out again here. */
24311 if (!S_IS_COMMON (fixp
->fx_addsy
))
24312 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
24313 code
= fixp
->fx_r_type
;
24317 case BFD_RELOC_ARM_IMMEDIATE
:
24318 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24319 _("internal relocation (type: IMMEDIATE) not fixed up"));
24322 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
24323 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24324 _("ADRL used for a symbol not defined in the same file"));
24327 case BFD_RELOC_ARM_OFFSET_IMM
:
24328 if (section
->use_rela_p
)
24330 code
= fixp
->fx_r_type
;
24334 if (fixp
->fx_addsy
!= NULL
24335 && !S_IS_DEFINED (fixp
->fx_addsy
)
24336 && S_IS_LOCAL (fixp
->fx_addsy
))
24338 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24339 _("undefined local label `%s'"),
24340 S_GET_NAME (fixp
->fx_addsy
));
24344 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24345 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24352 switch (fixp
->fx_r_type
)
24354 case BFD_RELOC_NONE
: type
= "NONE"; break;
24355 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
24356 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
24357 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
24358 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
24359 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
24360 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
24361 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
24362 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
24363 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
24364 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
24365 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
24366 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
24367 default: type
= _("<unknown>"); break;
24369 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24370 _("cannot represent %s relocation in this object file format"),
24377 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
24379 && fixp
->fx_addsy
== GOT_symbol
)
24381 code
= BFD_RELOC_ARM_GOTPC
;
24382 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
24386 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
24388 if (reloc
->howto
== NULL
)
24390 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24391 _("cannot represent %s relocation in this object file format"),
24392 bfd_get_reloc_code_name (code
));
24396 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24397 vtable entry to be used in the relocation's section offset. */
24398 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24399 reloc
->address
= fixp
->fx_offset
;
24404 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24407 cons_fix_new_arm (fragS
* frag
,
24411 bfd_reloc_code_real_type reloc
)
24416 FIXME: @@ Should look at CPU word size. */
24420 reloc
= BFD_RELOC_8
;
24423 reloc
= BFD_RELOC_16
;
24427 reloc
= BFD_RELOC_32
;
24430 reloc
= BFD_RELOC_64
;
24435 if (exp
->X_op
== O_secrel
)
24437 exp
->X_op
= O_symbol
;
24438 reloc
= BFD_RELOC_32_SECREL
;
24442 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
24445 #if defined (OBJ_COFF)
24447 arm_validate_fix (fixS
* fixP
)
24449 /* If the destination of the branch is a defined symbol which does not have
24450 the THUMB_FUNC attribute, then we must be calling a function which has
24451 the (interfacearm) attribute. We look for the Thumb entry point to that
24452 function and change the branch to refer to that function instead. */
24453 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
24454 && fixP
->fx_addsy
!= NULL
24455 && S_IS_DEFINED (fixP
->fx_addsy
)
24456 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
24458 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
24465 arm_force_relocation (struct fix
* fixp
)
24467 #if defined (OBJ_COFF) && defined (TE_PE)
24468 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
24472 /* In case we have a call or a branch to a function in ARM ISA mode from
24473 a thumb function or vice-versa force the relocation. These relocations
24474 are cleared off for some cores that might have blx and simple transformations
24478 switch (fixp
->fx_r_type
)
24480 case BFD_RELOC_ARM_PCREL_JUMP
:
24481 case BFD_RELOC_ARM_PCREL_CALL
:
24482 case BFD_RELOC_THUMB_PCREL_BLX
:
24483 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
24487 case BFD_RELOC_ARM_PCREL_BLX
:
24488 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24489 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24490 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24491 if (ARM_IS_FUNC (fixp
->fx_addsy
))
24500 /* Resolve these relocations even if the symbol is extern or weak.
24501 Technically this is probably wrong due to symbol preemption.
24502 In practice these relocations do not have enough range to be useful
24503 at dynamic link time, and some code (e.g. in the Linux kernel)
24504 expects these references to be resolved. */
24505 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
24506 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
24507 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
24508 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
24509 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24510 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
24511 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
24512 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
24513 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24514 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
24515 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
24516 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
24517 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
24518 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
24521 /* Always leave these relocations for the linker. */
24522 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24523 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24524 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24527 /* Always generate relocations against function symbols. */
24528 if (fixp
->fx_r_type
== BFD_RELOC_32
24530 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24533 return generic_force_reloc (fixp
);
24536 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24537 /* Relocations against function names must be left unadjusted,
24538 so that the linker can use this information to generate interworking
24539 stubs. The MIPS version of this function
24540 also prevents relocations that are mips-16 specific, but I do not
24541 know why it does this.
24544 There is one other problem that ought to be addressed here, but
24545 which currently is not: Taking the address of a label (rather
24546 than a function) and then later jumping to that address. Such
24547 addresses also ought to have their bottom bit set (assuming that
24548 they reside in Thumb code), but at the moment they will not. */
24551 arm_fix_adjustable (fixS
* fixP
)
24553 if (fixP
->fx_addsy
== NULL
)
24556 /* Preserve relocations against symbols with function type. */
24557 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24560 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24561 && fixP
->fx_subsy
== NULL
)
24564 /* We need the symbol name for the VTABLE entries. */
24565 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24566 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24569 /* Don't allow symbols to be discarded on GOT related relocs. */
24570 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24571 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24572 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24573 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24574 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24575 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24576 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24577 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24578 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24579 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24580 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24581 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24582 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24583 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24586 /* Similarly for group relocations. */
24587 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24588 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24589 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24592 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24593 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24594 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24595 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24596 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24597 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24598 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24599 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24600 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24603 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24604 offsets, so keep these symbols. */
24605 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24606 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24611 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24615 elf32_arm_target_format (void)
24618 return (target_big_endian
24619 ? "elf32-bigarm-symbian"
24620 : "elf32-littlearm-symbian");
24621 #elif defined (TE_VXWORKS)
24622 return (target_big_endian
24623 ? "elf32-bigarm-vxworks"
24624 : "elf32-littlearm-vxworks");
24625 #elif defined (TE_NACL)
24626 return (target_big_endian
24627 ? "elf32-bigarm-nacl"
24628 : "elf32-littlearm-nacl");
24630 if (target_big_endian
)
24631 return "elf32-bigarm";
24633 return "elf32-littlearm";
24638 armelf_frob_symbol (symbolS
* symp
,
24641 elf_frob_symbol (symp
, puntp
);
24645 /* MD interface: Finalization. */
24650 literal_pool
* pool
;
24652 /* Ensure that all the IT blocks are properly closed. */
24653 check_it_blocks_finished ();
24655 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24657 /* Put it at the end of the relevant section. */
24658 subseg_set (pool
->section
, pool
->sub_section
);
24660 arm_elf_change_section ();
24667 /* Remove any excess mapping symbols generated for alignment frags in
24668 SEC. We may have created a mapping symbol before a zero byte
24669 alignment; remove it if there's a mapping symbol after the
24672 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24673 void *dummy ATTRIBUTE_UNUSED
)
24675 segment_info_type
*seginfo
= seg_info (sec
);
24678 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24681 for (fragp
= seginfo
->frchainP
->frch_root
;
24683 fragp
= fragp
->fr_next
)
24685 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24686 fragS
*next
= fragp
->fr_next
;
24688 /* Variable-sized frags have been converted to fixed size by
24689 this point. But if this was variable-sized to start with,
24690 there will be a fixed-size frag after it. So don't handle
24692 if (sym
== NULL
|| next
== NULL
)
24695 if (S_GET_VALUE (sym
) < next
->fr_address
)
24696 /* Not at the end of this frag. */
24698 know (S_GET_VALUE (sym
) == next
->fr_address
);
24702 if (next
->tc_frag_data
.first_map
!= NULL
)
24704 /* Next frag starts with a mapping symbol. Discard this
24706 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24710 if (next
->fr_next
== NULL
)
24712 /* This mapping symbol is at the end of the section. Discard
24714 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24715 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24719 /* As long as we have empty frags without any mapping symbols,
24721 /* If the next frag is non-empty and does not start with a
24722 mapping symbol, then this mapping symbol is required. */
24723 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24726 next
= next
->fr_next
;
24728 while (next
!= NULL
);
24733 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24737 arm_adjust_symtab (void)
24742 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24744 if (ARM_IS_THUMB (sym
))
24746 if (THUMB_IS_FUNC (sym
))
24748 /* Mark the symbol as a Thumb function. */
24749 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24750 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24751 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
24753 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
24754 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
24756 as_bad (_("%s: unexpected function type: %d"),
24757 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
24759 else switch (S_GET_STORAGE_CLASS (sym
))
24762 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
24765 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
24768 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
24776 if (ARM_IS_INTERWORK (sym
))
24777 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
24784 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24786 if (ARM_IS_THUMB (sym
))
24788 elf_symbol_type
* elf_sym
;
24790 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
24791 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
24793 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
24794 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
24796 /* If it's a .thumb_func, declare it as so,
24797 otherwise tag label as .code 16. */
24798 if (THUMB_IS_FUNC (sym
))
24799 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
24800 ST_BRANCH_TO_THUMB
);
24801 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
24802 elf_sym
->internal_elf_sym
.st_info
=
24803 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
24808 /* Remove any overlapping mapping symbols generated by alignment frags. */
24809 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
24810 /* Now do generic ELF adjustments. */
24811 elf_adjust_symtab ();
24815 /* MD interface: Initialization. */
24818 set_constant_flonums (void)
24822 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
24823 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
24827 /* Auto-select Thumb mode if it's the only available instruction set for the
24828 given architecture. */
24831 autoselect_thumb_from_cpu_variant (void)
24833 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
24834 opcode_select (16);
24843 if ( (arm_ops_hsh
= hash_new ()) == NULL
24844 || (arm_cond_hsh
= hash_new ()) == NULL
24845 || (arm_shift_hsh
= hash_new ()) == NULL
24846 || (arm_psr_hsh
= hash_new ()) == NULL
24847 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
24848 || (arm_reg_hsh
= hash_new ()) == NULL
24849 || (arm_reloc_hsh
= hash_new ()) == NULL
24850 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
24851 as_fatal (_("virtual memory exhausted"));
24853 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
24854 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
24855 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
24856 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
24857 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
24858 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
24859 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
24860 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
24861 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
24862 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
24863 (void *) (v7m_psrs
+ i
));
24864 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
24865 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
24867 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
24869 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
24870 (void *) (barrier_opt_names
+ i
));
24872 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
24874 struct reloc_entry
* entry
= reloc_names
+ i
;
24876 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
24877 /* This makes encode_branch() use the EABI versions of this relocation. */
24878 entry
->reloc
= BFD_RELOC_UNUSED
;
24880 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
24884 set_constant_flonums ();
24886 /* Set the cpu variant based on the command-line options. We prefer
24887 -mcpu= over -march= if both are set (as for GCC); and we prefer
24888 -mfpu= over any other way of setting the floating point unit.
24889 Use of legacy options with new options are faulted. */
24892 if (mcpu_cpu_opt
|| march_cpu_opt
)
24893 as_bad (_("use of old and new-style options to set CPU type"));
24895 mcpu_cpu_opt
= legacy_cpu
;
24897 else if (!mcpu_cpu_opt
)
24898 mcpu_cpu_opt
= march_cpu_opt
;
24903 as_bad (_("use of old and new-style options to set FPU type"));
24905 mfpu_opt
= legacy_fpu
;
24907 else if (!mfpu_opt
)
24909 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24910 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24911 /* Some environments specify a default FPU. If they don't, infer it
24912 from the processor. */
24914 mfpu_opt
= mcpu_fpu_opt
;
24916 mfpu_opt
= march_fpu_opt
;
24918 mfpu_opt
= &fpu_default
;
24924 if (mcpu_cpu_opt
!= NULL
)
24925 mfpu_opt
= &fpu_default
;
24926 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
24927 mfpu_opt
= &fpu_arch_vfp_v2
;
24929 mfpu_opt
= &fpu_arch_fpa
;
24935 mcpu_cpu_opt
= &cpu_default
;
24936 selected_cpu
= cpu_default
;
24938 else if (no_cpu_selected ())
24939 selected_cpu
= cpu_default
;
24942 selected_cpu
= *mcpu_cpu_opt
;
24944 mcpu_cpu_opt
= &arm_arch_any
;
24947 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
24949 autoselect_thumb_from_cpu_variant ();
24951 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
24953 #if defined OBJ_COFF || defined OBJ_ELF
24955 unsigned int flags
= 0;
24957 #if defined OBJ_ELF
24958 flags
= meabi_flags
;
24960 switch (meabi_flags
)
24962 case EF_ARM_EABI_UNKNOWN
:
24964 /* Set the flags in the private structure. */
24965 if (uses_apcs_26
) flags
|= F_APCS26
;
24966 if (support_interwork
) flags
|= F_INTERWORK
;
24967 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
24968 if (pic_code
) flags
|= F_PIC
;
24969 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
24970 flags
|= F_SOFT_FLOAT
;
24972 switch (mfloat_abi_opt
)
24974 case ARM_FLOAT_ABI_SOFT
:
24975 case ARM_FLOAT_ABI_SOFTFP
:
24976 flags
|= F_SOFT_FLOAT
;
24979 case ARM_FLOAT_ABI_HARD
:
24980 if (flags
& F_SOFT_FLOAT
)
24981 as_bad (_("hard-float conflicts with specified fpu"));
24985 /* Using pure-endian doubles (even if soft-float). */
24986 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
24987 flags
|= F_VFP_FLOAT
;
24989 #if defined OBJ_ELF
24990 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
24991 flags
|= EF_ARM_MAVERICK_FLOAT
;
24994 case EF_ARM_EABI_VER4
:
24995 case EF_ARM_EABI_VER5
:
24996 /* No additional flags to set. */
25003 bfd_set_private_flags (stdoutput
, flags
);
25005 /* We have run out flags in the COFF header to encode the
25006 status of ATPCS support, so instead we create a dummy,
25007 empty, debug section called .arm.atpcs. */
25012 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
25016 bfd_set_section_flags
25017 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
25018 bfd_set_section_size (stdoutput
, sec
, 0);
25019 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
25025 /* Record the CPU type as well. */
25026 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
25027 mach
= bfd_mach_arm_iWMMXt2
;
25028 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
25029 mach
= bfd_mach_arm_iWMMXt
;
25030 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
25031 mach
= bfd_mach_arm_XScale
;
25032 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
25033 mach
= bfd_mach_arm_ep9312
;
25034 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
25035 mach
= bfd_mach_arm_5TE
;
25036 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
25038 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
25039 mach
= bfd_mach_arm_5T
;
25041 mach
= bfd_mach_arm_5
;
25043 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
25045 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
25046 mach
= bfd_mach_arm_4T
;
25048 mach
= bfd_mach_arm_4
;
25050 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
25051 mach
= bfd_mach_arm_3M
;
25052 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
25053 mach
= bfd_mach_arm_3
;
25054 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
25055 mach
= bfd_mach_arm_2a
;
25056 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
25057 mach
= bfd_mach_arm_2
;
25059 mach
= bfd_mach_arm_unknown
;
25061 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
25064 /* Command line processing. */
25067 Invocation line includes a switch not recognized by the base assembler.
25068 See if it's a processor-specific option.
25070 This routine is somewhat complicated by the need for backwards
25071 compatibility (since older releases of gcc can't be changed).
25072 The new options try to make the interface as compatible as
25075 New options (supported) are:
25077 -mcpu=<cpu name> Assemble for selected processor
25078 -march=<architecture name> Assemble for selected architecture
25079 -mfpu=<fpu architecture> Assemble for selected FPU.
25080 -EB/-mbig-endian Big-endian
25081 -EL/-mlittle-endian Little-endian
25082 -k Generate PIC code
25083 -mthumb Start in Thumb mode
25084 -mthumb-interwork Code supports ARM/Thumb interworking
25086 -m[no-]warn-deprecated Warn about deprecated features
25087 -m[no-]warn-syms Warn when symbols match instructions
25089 For now we will also provide support for:
25091 -mapcs-32 32-bit Program counter
25092 -mapcs-26 26-bit Program counter
25093 -macps-float Floats passed in FP registers
25094 -mapcs-reentrant Reentrant code
25096 (sometime these will probably be replaced with -mapcs=<list of options>
25097 and -matpcs=<list of options>)
25099 The remaining options are only supported for back-wards compatibility.
25100 Cpu variants, the arm part is optional:
25101 -m[arm]1 Currently not supported.
25102 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25103 -m[arm]3 Arm 3 processor
25104 -m[arm]6[xx], Arm 6 processors
25105 -m[arm]7[xx][t][[d]m] Arm 7 processors
25106 -m[arm]8[10] Arm 8 processors
25107 -m[arm]9[20][tdmi] Arm 9 processors
25108 -mstrongarm[110[0]] StrongARM processors
25109 -mxscale XScale processors
25110 -m[arm]v[2345[t[e]]] Arm architectures
25111 -mall All (except the ARM1)
25113 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25114 -mfpe-old (No float load/store multiples)
25115 -mvfpxd VFP Single precision
25117 -mno-fpu Disable all floating point instructions
25119 The following CPU names are recognized:
25120 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25121 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25122 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25123 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25124 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25125 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25126 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25130 const char * md_shortopts
= "m:k";
25132 #ifdef ARM_BI_ENDIAN
25133 #define OPTION_EB (OPTION_MD_BASE + 0)
25134 #define OPTION_EL (OPTION_MD_BASE + 1)
25136 #if TARGET_BYTES_BIG_ENDIAN
25137 #define OPTION_EB (OPTION_MD_BASE + 0)
25139 #define OPTION_EL (OPTION_MD_BASE + 1)
25142 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25144 struct option md_longopts
[] =
25147 {"EB", no_argument
, NULL
, OPTION_EB
},
25150 {"EL", no_argument
, NULL
, OPTION_EL
},
25152 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
25153 {NULL
, no_argument
, NULL
, 0}
25157 size_t md_longopts_size
= sizeof (md_longopts
);
25159 struct arm_option_table
25161 const char *option
; /* Option name to match. */
25162 const char *help
; /* Help information. */
25163 int *var
; /* Variable to change. */
25164 int value
; /* What to change it to. */
25165 const char *deprecated
; /* If non-null, print this message. */
25168 struct arm_option_table arm_opts
[] =
25170 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
25171 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
25172 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25173 &support_interwork
, 1, NULL
},
25174 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
25175 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
25176 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
25178 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
25179 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
25180 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
25181 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
25184 /* These are recognized by the assembler, but have no affect on code. */
25185 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
25186 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
25188 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
25189 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25190 &warn_on_deprecated
, 0, NULL
},
25191 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
25192 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
25193 {NULL
, NULL
, NULL
, 0, NULL
}
25196 struct arm_legacy_option_table
25198 const char *option
; /* Option name to match. */
25199 const arm_feature_set
**var
; /* Variable to change. */
25200 const arm_feature_set value
; /* What to change it to. */
25201 const char *deprecated
; /* If non-null, print this message. */
25204 const struct arm_legacy_option_table arm_legacy_opts
[] =
25206 /* DON'T add any new processors to this list -- we want the whole list
25207 to go away... Add them to the processors table instead. */
25208 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25209 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25210 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25211 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25212 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25213 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25214 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25215 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25216 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25217 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25218 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25219 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25220 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25221 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25222 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25223 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25224 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25225 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25226 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25227 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25228 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25229 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25230 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25231 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25232 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25233 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25234 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25235 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25236 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25237 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25238 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25239 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25240 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25241 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25242 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25243 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25244 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25245 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25246 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25247 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25248 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25249 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25250 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25251 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25252 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25253 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25254 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25255 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25256 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25257 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25258 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25259 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25260 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25261 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25262 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25263 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25264 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25265 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25266 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25267 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25268 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25269 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25270 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25271 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25272 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25273 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25274 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25275 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25276 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
25277 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
25278 N_("use -mcpu=strongarm110")},
25279 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
25280 N_("use -mcpu=strongarm1100")},
25281 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
25282 N_("use -mcpu=strongarm1110")},
25283 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
25284 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
25285 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
25287 /* Architecture variants -- don't add any more to this list either. */
25288 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25289 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25290 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25291 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25292 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25293 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25294 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25295 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25296 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25297 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25298 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25299 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25300 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25301 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25302 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25303 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25304 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25305 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25307 /* Floating point variants -- don't add any more to this list either. */
25308 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
25309 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
25310 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
25311 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
25312 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25314 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
25317 struct arm_cpu_option_table
25321 const arm_feature_set value
;
25322 /* For some CPUs we assume an FPU unless the user explicitly sets
25324 const arm_feature_set default_fpu
;
25325 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25327 const char *canonical_name
;
25330 /* This list should, at a minimum, contain all the cpu names
25331 recognized by GCC. */
25332 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25333 static const struct arm_cpu_option_table arm_cpus
[] =
25335 ARM_CPU_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
, NULL
),
25336 ARM_CPU_OPT ("arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
),
25337 ARM_CPU_OPT ("arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
),
25338 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
25339 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
25340 ARM_CPU_OPT ("arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25341 ARM_CPU_OPT ("arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25342 ARM_CPU_OPT ("arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25343 ARM_CPU_OPT ("arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25344 ARM_CPU_OPT ("arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25345 ARM_CPU_OPT ("arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25346 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25347 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25348 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25349 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25350 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25351 ARM_CPU_OPT ("arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25352 ARM_CPU_OPT ("arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25353 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25354 ARM_CPU_OPT ("arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25355 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25356 ARM_CPU_OPT ("arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25357 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25358 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25359 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25360 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25361 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25362 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25363 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25364 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25365 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25366 ARM_CPU_OPT ("arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25367 ARM_CPU_OPT ("arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25368 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25369 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25370 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25371 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25372 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25373 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25374 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"),
25375 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25376 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25377 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25378 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25379 ARM_CPU_OPT ("fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25380 ARM_CPU_OPT ("fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25381 /* For V5 or later processors we default to using VFP; but the user
25382 should really set the FPU type explicitly. */
25383 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25384 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25385 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
25386 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
25387 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
25388 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25389 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"),
25390 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25391 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25392 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"),
25393 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25394 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25395 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25396 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25397 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25398 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"),
25399 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25400 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25401 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25402 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
,
25404 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
25405 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25406 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25407 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25408 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25409 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25410 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"),
25411 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
),
25412 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
,
25414 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
),
25415 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"),
25416 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"),
25417 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
),
25418 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
),
25419 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ
, FPU_NONE
, NULL
),
25420 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ
, FPU_ARCH_VFP_V2
, NULL
),
25421 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC
,
25422 FPU_NONE
, "Cortex-A5"),
25423 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25425 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC
,
25426 ARM_FEATURE_COPROC (FPU_VFP_V3
25427 | FPU_NEON_EXT_V1
),
25429 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC
,
25430 ARM_FEATURE_COPROC (FPU_VFP_V3
25431 | FPU_NEON_EXT_V1
),
25433 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25435 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25437 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25439 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25441 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25443 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25445 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25447 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25449 ARM_CPU_OPT ("cortex-a73", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25451 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"),
25452 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
25454 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV
,
25455 FPU_NONE
, "Cortex-R5"),
25456 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV
,
25457 FPU_ARCH_VFP_V3D16
,
25459 ARM_CPU_OPT ("cortex-r8", ARM_ARCH_V7R_IDIV
,
25460 FPU_ARCH_VFP_V3D16
,
25462 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M7"),
25463 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"),
25464 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"),
25465 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"),
25466 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"),
25467 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0+"),
25468 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25471 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25475 /* ??? XSCALE is really an architecture. */
25476 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25477 /* ??? iwmmxt is not a processor. */
25478 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
),
25479 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
),
25480 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25482 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
25483 FPU_ARCH_MAVERICK
, "ARM920T"),
25484 /* Marvell processors. */
25485 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25487 ARM_EXT2_V6T2_V8M
),
25488 FPU_ARCH_VFP_V3D16
, NULL
),
25489 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25491 ARM_EXT2_V6T2_V8M
),
25492 FPU_ARCH_NEON_VFP_V4
, NULL
),
25493 /* APM X-Gene family. */
25494 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25496 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25499 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
25503 struct arm_arch_option_table
25507 const arm_feature_set value
;
25508 const arm_feature_set default_fpu
;
25511 /* This list should, at a minimum, contain all the architecture names
25512 recognized by GCC. */
25513 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25514 static const struct arm_arch_option_table arm_archs
[] =
25516 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
25517 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
25518 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
25519 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25520 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25521 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
25522 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
25523 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
25524 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
25525 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
25526 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
25527 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
25528 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
25529 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
25530 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
25531 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
25532 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
25533 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25534 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25535 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
25536 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
25537 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25538 kept to preserve existing behaviour. */
25539 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25540 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25541 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
25542 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
25543 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
25544 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25545 kept to preserve existing behaviour. */
25546 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25547 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25548 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
25549 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
25550 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
25551 /* The official spelling of the ARMv7 profile variants is the dashed form.
25552 Accept the non-dashed form for compatibility with old toolchains. */
25553 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25554 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
25555 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25556 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25557 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25558 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25559 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25560 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
25561 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
25562 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
25563 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
25564 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
25565 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
25566 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
25567 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
25568 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
25569 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25571 #undef ARM_ARCH_OPT
25573 /* ISA extensions in the co-processor and main instruction set space. */
25574 struct arm_option_extension_value_table
25578 const arm_feature_set merge_value
;
25579 const arm_feature_set clear_value
;
25580 /* List of architectures for which an extension is available. ARM_ARCH_NONE
25581 indicates that an extension is available for all architectures while
25582 ARM_ANY marks an empty entry. */
25583 const arm_feature_set allowed_archs
[2];
25586 /* The following table must be in alphabetical order with a NULL last entry.
25588 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25589 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25590 static const struct arm_option_extension_value_table arm_extensions
[] =
25592 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25593 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25594 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25595 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
25596 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25597 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25598 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25599 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
25600 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
25601 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25602 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25603 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25605 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25606 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25607 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
25608 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
25609 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
25610 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
25611 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
25612 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
25613 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
25614 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
25615 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25616 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25617 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
25618 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
25619 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25620 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25621 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
25622 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
25623 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
25624 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25625 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
25626 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
25627 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25628 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
25629 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
25630 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25631 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25632 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25633 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
25634 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25635 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
25636 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
25637 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25638 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
25640 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
25641 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25642 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
25643 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
25644 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
25648 /* ISA floating-point and Advanced SIMD extensions. */
25649 struct arm_option_fpu_value_table
25652 const arm_feature_set value
;
25655 /* This list should, at a minimum, contain all the fpu names
25656 recognized by GCC. */
25657 static const struct arm_option_fpu_value_table arm_fpus
[] =
25659 {"softfpa", FPU_NONE
},
25660 {"fpe", FPU_ARCH_FPE
},
25661 {"fpe2", FPU_ARCH_FPE
},
25662 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
25663 {"fpa", FPU_ARCH_FPA
},
25664 {"fpa10", FPU_ARCH_FPA
},
25665 {"fpa11", FPU_ARCH_FPA
},
25666 {"arm7500fe", FPU_ARCH_FPA
},
25667 {"softvfp", FPU_ARCH_VFP
},
25668 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
25669 {"vfp", FPU_ARCH_VFP_V2
},
25670 {"vfp9", FPU_ARCH_VFP_V2
},
25671 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
25672 {"vfp10", FPU_ARCH_VFP_V2
},
25673 {"vfp10-r0", FPU_ARCH_VFP_V1
},
25674 {"vfpxd", FPU_ARCH_VFP_V1xD
},
25675 {"vfpv2", FPU_ARCH_VFP_V2
},
25676 {"vfpv3", FPU_ARCH_VFP_V3
},
25677 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
25678 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
25679 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
25680 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
25681 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
25682 {"arm1020t", FPU_ARCH_VFP_V1
},
25683 {"arm1020e", FPU_ARCH_VFP_V2
},
25684 {"arm1136jfs", FPU_ARCH_VFP_V2
},
25685 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
25686 {"maverick", FPU_ARCH_MAVERICK
},
25687 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
25688 {"neon-fp16", FPU_ARCH_NEON_FP16
},
25689 {"vfpv4", FPU_ARCH_VFP_V4
},
25690 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
25691 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
25692 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
25693 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
25694 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
25695 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
25696 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
25697 {"crypto-neon-fp-armv8",
25698 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
25699 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
25700 {"crypto-neon-fp-armv8.1",
25701 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
25702 {NULL
, ARM_ARCH_NONE
}
25705 struct arm_option_value_table
25711 static const struct arm_option_value_table arm_float_abis
[] =
25713 {"hard", ARM_FLOAT_ABI_HARD
},
25714 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
25715 {"soft", ARM_FLOAT_ABI_SOFT
},
25720 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25721 static const struct arm_option_value_table arm_eabis
[] =
25723 {"gnu", EF_ARM_EABI_UNKNOWN
},
25724 {"4", EF_ARM_EABI_VER4
},
25725 {"5", EF_ARM_EABI_VER5
},
25730 struct arm_long_option_table
25732 const char * option
; /* Substring to match. */
25733 const char * help
; /* Help information. */
25734 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
25735 const char * deprecated
; /* If non-null, print this message. */
25739 arm_parse_extension (const char *str
, const arm_feature_set
**opt_p
)
25741 arm_feature_set
*ext_set
= XNEW (arm_feature_set
);
25743 /* We insist on extensions being specified in alphabetical order, and with
25744 extensions being added before being removed. We achieve this by having
25745 the global ARM_EXTENSIONS table in alphabetical order, and using the
25746 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25747 or removing it (0) and only allowing it to change in the order
25749 const struct arm_option_extension_value_table
* opt
= NULL
;
25750 const arm_feature_set arm_any
= ARM_ANY
;
25751 int adding_value
= -1;
25753 /* Copy the feature set, so that we can modify it. */
25754 *ext_set
= **opt_p
;
25757 while (str
!= NULL
&& *str
!= 0)
25764 as_bad (_("invalid architectural extension"));
25769 ext
= strchr (str
, '+');
25774 len
= strlen (str
);
25776 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
25778 if (adding_value
!= 0)
25781 opt
= arm_extensions
;
25789 if (adding_value
== -1)
25792 opt
= arm_extensions
;
25794 else if (adding_value
!= 1)
25796 as_bad (_("must specify extensions to add before specifying "
25797 "those to remove"));
25804 as_bad (_("missing architectural extension"));
25808 gas_assert (adding_value
!= -1);
25809 gas_assert (opt
!= NULL
);
25811 /* Scan over the options table trying to find an exact match. */
25812 for (; opt
->name
!= NULL
; opt
++)
25813 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25815 int i
, nb_allowed_archs
=
25816 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
25817 /* Check we can apply the extension to this architecture. */
25818 for (i
= 0; i
< nb_allowed_archs
; i
++)
25821 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
25823 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *ext_set
))
25826 if (i
== nb_allowed_archs
)
25828 as_bad (_("extension does not apply to the base architecture"));
25832 /* Add or remove the extension. */
25834 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
25836 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
25841 if (opt
->name
== NULL
)
25843 /* Did we fail to find an extension because it wasn't specified in
25844 alphabetical order, or because it does not exist? */
25846 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
25847 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25850 if (opt
->name
== NULL
)
25851 as_bad (_("unknown architectural extension `%s'"), str
);
25853 as_bad (_("architectural extensions must be specified in "
25854 "alphabetical order"));
25860 /* We should skip the extension we've just matched the next time
25872 arm_parse_cpu (const char *str
)
25874 const struct arm_cpu_option_table
*opt
;
25875 const char *ext
= strchr (str
, '+');
25881 len
= strlen (str
);
25885 as_bad (_("missing cpu name `%s'"), str
);
25889 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
25890 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25892 mcpu_cpu_opt
= &opt
->value
;
25893 mcpu_fpu_opt
= &opt
->default_fpu
;
25894 if (opt
->canonical_name
)
25896 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
25897 strcpy (selected_cpu_name
, opt
->canonical_name
);
25903 if (len
>= sizeof selected_cpu_name
)
25904 len
= (sizeof selected_cpu_name
) - 1;
25906 for (i
= 0; i
< len
; i
++)
25907 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25908 selected_cpu_name
[i
] = 0;
25912 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
25917 as_bad (_("unknown cpu `%s'"), str
);
25922 arm_parse_arch (const char *str
)
25924 const struct arm_arch_option_table
*opt
;
25925 const char *ext
= strchr (str
, '+');
25931 len
= strlen (str
);
25935 as_bad (_("missing architecture name `%s'"), str
);
25939 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
25940 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25942 march_cpu_opt
= &opt
->value
;
25943 march_fpu_opt
= &opt
->default_fpu
;
25944 strcpy (selected_cpu_name
, opt
->name
);
25947 return arm_parse_extension (ext
, &march_cpu_opt
);
25952 as_bad (_("unknown architecture `%s'\n"), str
);
25957 arm_parse_fpu (const char * str
)
25959 const struct arm_option_fpu_value_table
* opt
;
25961 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
25962 if (streq (opt
->name
, str
))
25964 mfpu_opt
= &opt
->value
;
25968 as_bad (_("unknown floating point format `%s'\n"), str
);
25973 arm_parse_float_abi (const char * str
)
25975 const struct arm_option_value_table
* opt
;
25977 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
25978 if (streq (opt
->name
, str
))
25980 mfloat_abi_opt
= opt
->value
;
25984 as_bad (_("unknown floating point abi `%s'\n"), str
);
25990 arm_parse_eabi (const char * str
)
25992 const struct arm_option_value_table
*opt
;
25994 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
25995 if (streq (opt
->name
, str
))
25997 meabi_flags
= opt
->value
;
26000 as_bad (_("unknown EABI `%s'\n"), str
);
26006 arm_parse_it_mode (const char * str
)
26008 bfd_boolean ret
= TRUE
;
26010 if (streq ("arm", str
))
26011 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
26012 else if (streq ("thumb", str
))
26013 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
26014 else if (streq ("always", str
))
26015 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
26016 else if (streq ("never", str
))
26017 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
26020 as_bad (_("unknown implicit IT mode `%s', should be "\
26021 "arm, thumb, always, or never."), str
);
26029 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
26031 codecomposer_syntax
= TRUE
;
26032 arm_comment_chars
[0] = ';';
26033 arm_line_separator_chars
[0] = 0;
26037 struct arm_long_option_table arm_long_opts
[] =
26039 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26040 arm_parse_cpu
, NULL
},
26041 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26042 arm_parse_arch
, NULL
},
26043 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26044 arm_parse_fpu
, NULL
},
26045 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26046 arm_parse_float_abi
, NULL
},
26048 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26049 arm_parse_eabi
, NULL
},
26051 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26052 arm_parse_it_mode
, NULL
},
26053 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26054 arm_ccs_mode
, NULL
},
26055 {NULL
, NULL
, 0, NULL
}
26059 md_parse_option (int c
, const char * arg
)
26061 struct arm_option_table
*opt
;
26062 const struct arm_legacy_option_table
*fopt
;
26063 struct arm_long_option_table
*lopt
;
26069 target_big_endian
= 1;
26075 target_big_endian
= 0;
26079 case OPTION_FIX_V4BX
:
26084 /* Listing option. Just ignore these, we don't support additional
26089 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26091 if (c
== opt
->option
[0]
26092 && ((arg
== NULL
&& opt
->option
[1] == 0)
26093 || streq (arg
, opt
->option
+ 1)))
26095 /* If the option is deprecated, tell the user. */
26096 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
26097 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26098 arg
? arg
: "", _(opt
->deprecated
));
26100 if (opt
->var
!= NULL
)
26101 *opt
->var
= opt
->value
;
26107 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
26109 if (c
== fopt
->option
[0]
26110 && ((arg
== NULL
&& fopt
->option
[1] == 0)
26111 || streq (arg
, fopt
->option
+ 1)))
26113 /* If the option is deprecated, tell the user. */
26114 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
26115 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26116 arg
? arg
: "", _(fopt
->deprecated
));
26118 if (fopt
->var
!= NULL
)
26119 *fopt
->var
= &fopt
->value
;
26125 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26127 /* These options are expected to have an argument. */
26128 if (c
== lopt
->option
[0]
26130 && strncmp (arg
, lopt
->option
+ 1,
26131 strlen (lopt
->option
+ 1)) == 0)
26133 /* If the option is deprecated, tell the user. */
26134 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
26135 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
26136 _(lopt
->deprecated
));
26138 /* Call the sup-option parser. */
26139 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
26150 md_show_usage (FILE * fp
)
26152 struct arm_option_table
*opt
;
26153 struct arm_long_option_table
*lopt
;
26155 fprintf (fp
, _(" ARM-specific assembler options:\n"));
26157 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26158 if (opt
->help
!= NULL
)
26159 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
26161 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26162 if (lopt
->help
!= NULL
)
26163 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
26167 -EB assemble code for a big-endian cpu\n"));
26172 -EL assemble code for a little-endian cpu\n"));
26176 --fix-v4bx Allow BX in ARMv4 code\n"));
26184 arm_feature_set flags
;
26185 } cpu_arch_ver_table
;
26187 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
26188 must be sorted least features first but some reordering is needed, eg. for
26189 Thumb-2 instructions to be detected as coming from ARMv6T2. */
26190 static const cpu_arch_ver_table cpu_arch_ver
[] =
26196 {4, ARM_ARCH_V5TE
},
26197 {5, ARM_ARCH_V5TEJ
},
26201 {11, ARM_ARCH_V6M
},
26202 {12, ARM_ARCH_V6SM
},
26203 {8, ARM_ARCH_V6T2
},
26204 {10, ARM_ARCH_V7VE
},
26205 {10, ARM_ARCH_V7R
},
26206 {10, ARM_ARCH_V7M
},
26207 {14, ARM_ARCH_V8A
},
26208 {16, ARM_ARCH_V8M_BASE
},
26209 {17, ARM_ARCH_V8M_MAIN
},
26213 /* Set an attribute if it has not already been set by the user. */
26215 aeabi_set_attribute_int (int tag
, int value
)
26218 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26219 || !attributes_set_explicitly
[tag
])
26220 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
26224 aeabi_set_attribute_string (int tag
, const char *value
)
26227 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26228 || !attributes_set_explicitly
[tag
])
26229 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
26232 /* Set the public EABI object attributes. */
26234 aeabi_set_public_attributes (void)
26239 int fp16_optional
= 0;
26240 arm_feature_set arm_arch
= ARM_ARCH_NONE
;
26241 arm_feature_set flags
;
26242 arm_feature_set tmp
;
26243 arm_feature_set arm_arch_v8m_base
= ARM_ARCH_V8M_BASE
;
26244 const cpu_arch_ver_table
*p
;
26246 /* Choose the architecture based on the capabilities of the requested cpu
26247 (if any) and/or the instructions actually used. */
26248 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
26249 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
26250 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
26252 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
26253 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
26255 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
26256 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
26258 selected_cpu
= flags
;
26260 /* Allow the user to override the reported architecture. */
26263 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
26264 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
26267 /* We need to make sure that the attributes do not identify us as v6S-M
26268 when the only v6S-M feature in use is the Operating System Extensions. */
26269 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
26270 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
26271 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
26275 for (p
= cpu_arch_ver
; p
->val
; p
++)
26277 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
26280 arm_arch
= p
->flags
;
26281 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
26285 /* The table lookup above finds the last architecture to contribute
26286 a new feature. Unfortunately, Tag13 is a subset of the union of
26287 v6T2 and v7-M, so it is never seen as contributing a new feature.
26288 We can not search for the last entry which is entirely used,
26289 because if no CPU is specified we build up only those flags
26290 actually used. Perhaps we should separate out the specified
26291 and implicit cases. Avoid taking this path for -march=all by
26292 checking for contradictory v7-A / v7-M features. */
26293 if (arch
== TAG_CPU_ARCH_V7
26294 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26295 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
26296 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
26298 arch
= TAG_CPU_ARCH_V7E_M
;
26299 arm_arch
= (arm_feature_set
) ARM_ARCH_V7EM
;
26302 ARM_CLEAR_FEATURE (tmp
, flags
, arm_arch_v8m_base
);
26303 if (arch
== TAG_CPU_ARCH_V8M_BASE
&& ARM_CPU_HAS_FEATURE (tmp
, arm_arch_any
))
26305 arch
= TAG_CPU_ARCH_V8M_MAIN
;
26306 arm_arch
= (arm_feature_set
) ARM_ARCH_V8M_MAIN
;
26309 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26310 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26311 ARMv8-M, -march=all must be detected as ARMv8-A. */
26312 if (arch
== TAG_CPU_ARCH_V8M_MAIN
26313 && ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
26315 arch
= TAG_CPU_ARCH_V8
;
26316 arm_arch
= (arm_feature_set
) ARM_ARCH_V8A
;
26319 /* Tag_CPU_name. */
26320 if (selected_cpu_name
[0])
26324 q
= selected_cpu_name
;
26325 if (strncmp (q
, "armv", 4) == 0)
26330 for (i
= 0; q
[i
]; i
++)
26331 q
[i
] = TOUPPER (q
[i
]);
26333 aeabi_set_attribute_string (Tag_CPU_name
, q
);
26336 /* Tag_CPU_arch. */
26337 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
26339 /* Tag_CPU_arch_profile. */
26340 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26341 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26342 || (ARM_CPU_HAS_FEATURE (flags
, arm_ext_atomics
)
26343 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
)))
26345 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
26347 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
26352 if (profile
!= '\0')
26353 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
26355 /* Tag_DSP_extension. */
26356 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_dsp
))
26358 arm_feature_set ext
;
26360 /* DSP instructions not in architecture. */
26361 ARM_CLEAR_FEATURE (ext
, flags
, arm_arch
);
26362 if (ARM_CPU_HAS_FEATURE (ext
, arm_ext_dsp
))
26363 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
26366 /* Tag_ARM_ISA_use. */
26367 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
26369 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
26371 /* Tag_THUMB_ISA_use. */
26372 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
26377 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26378 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
26380 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
26384 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
26387 /* Tag_VFP_arch. */
26388 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
26389 aeabi_set_attribute_int (Tag_VFP_arch
,
26390 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26392 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
26393 aeabi_set_attribute_int (Tag_VFP_arch
,
26394 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26396 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
26399 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
26401 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
26403 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
26406 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
26407 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
26408 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
26409 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
26410 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
26412 /* Tag_ABI_HardFP_use. */
26413 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
26414 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
26415 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
26417 /* Tag_WMMX_arch. */
26418 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
26419 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
26420 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
26421 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
26423 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26424 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
26425 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
26426 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
26427 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
26428 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
26430 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
26432 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
26436 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
26441 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26442 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
26443 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
26447 We set Tag_DIV_use to two when integer divide instructions have been used
26448 in ARM state, or when Thumb integer divide instructions have been used,
26449 but we have no architecture profile set, nor have we any ARM instructions.
26451 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26452 by the base architecture.
26454 For new architectures we will have to check these tests. */
26455 gas_assert (arch
<= TAG_CPU_ARCH_V8
26456 || (arch
>= TAG_CPU_ARCH_V8M_BASE
26457 && arch
<= TAG_CPU_ARCH_V8M_MAIN
));
26458 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26459 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
26460 aeabi_set_attribute_int (Tag_DIV_use
, 0);
26461 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
26462 || (profile
== '\0'
26463 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
26464 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
26465 aeabi_set_attribute_int (Tag_DIV_use
, 2);
26467 /* Tag_MP_extension_use. */
26468 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
26469 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
26471 /* Tag Virtualization_use. */
26472 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
26474 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
26477 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
26480 /* Add the default contents for the .ARM.attributes section. */
26484 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26487 aeabi_set_public_attributes ();
26489 #endif /* OBJ_ELF */
26492 /* Parse a .cpu directive. */
26495 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
26497 const struct arm_cpu_option_table
*opt
;
26501 name
= input_line_pointer
;
26502 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26503 input_line_pointer
++;
26504 saved_char
= *input_line_pointer
;
26505 *input_line_pointer
= 0;
26507 /* Skip the first "all" entry. */
26508 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
26509 if (streq (opt
->name
, name
))
26511 mcpu_cpu_opt
= &opt
->value
;
26512 selected_cpu
= opt
->value
;
26513 if (opt
->canonical_name
)
26514 strcpy (selected_cpu_name
, opt
->canonical_name
);
26518 for (i
= 0; opt
->name
[i
]; i
++)
26519 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
26521 selected_cpu_name
[i
] = 0;
26523 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26524 *input_line_pointer
= saved_char
;
26525 demand_empty_rest_of_line ();
26528 as_bad (_("unknown cpu `%s'"), name
);
26529 *input_line_pointer
= saved_char
;
26530 ignore_rest_of_line ();
26534 /* Parse a .arch directive. */
26537 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
26539 const struct arm_arch_option_table
*opt
;
26543 name
= input_line_pointer
;
26544 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26545 input_line_pointer
++;
26546 saved_char
= *input_line_pointer
;
26547 *input_line_pointer
= 0;
26549 /* Skip the first "all" entry. */
26550 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26551 if (streq (opt
->name
, name
))
26553 mcpu_cpu_opt
= &opt
->value
;
26554 selected_cpu
= opt
->value
;
26555 strcpy (selected_cpu_name
, opt
->name
);
26556 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26557 *input_line_pointer
= saved_char
;
26558 demand_empty_rest_of_line ();
26562 as_bad (_("unknown architecture `%s'\n"), name
);
26563 *input_line_pointer
= saved_char
;
26564 ignore_rest_of_line ();
26568 /* Parse a .object_arch directive. */
26571 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
26573 const struct arm_arch_option_table
*opt
;
26577 name
= input_line_pointer
;
26578 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26579 input_line_pointer
++;
26580 saved_char
= *input_line_pointer
;
26581 *input_line_pointer
= 0;
26583 /* Skip the first "all" entry. */
26584 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26585 if (streq (opt
->name
, name
))
26587 object_arch
= &opt
->value
;
26588 *input_line_pointer
= saved_char
;
26589 demand_empty_rest_of_line ();
26593 as_bad (_("unknown architecture `%s'\n"), name
);
26594 *input_line_pointer
= saved_char
;
26595 ignore_rest_of_line ();
26598 /* Parse a .arch_extension directive. */
26601 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
26603 const struct arm_option_extension_value_table
*opt
;
26604 const arm_feature_set arm_any
= ARM_ANY
;
26607 int adding_value
= 1;
26609 name
= input_line_pointer
;
26610 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26611 input_line_pointer
++;
26612 saved_char
= *input_line_pointer
;
26613 *input_line_pointer
= 0;
26615 if (strlen (name
) >= 2
26616 && strncmp (name
, "no", 2) == 0)
26622 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26623 if (streq (opt
->name
, name
))
26625 int i
, nb_allowed_archs
=
26626 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
26627 for (i
= 0; i
< nb_allowed_archs
; i
++)
26630 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
26632 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *mcpu_cpu_opt
))
26636 if (i
== nb_allowed_archs
)
26638 as_bad (_("architectural extension `%s' is not allowed for the "
26639 "current base architecture"), name
);
26644 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
,
26647 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->clear_value
);
26649 mcpu_cpu_opt
= &selected_cpu
;
26650 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26651 *input_line_pointer
= saved_char
;
26652 demand_empty_rest_of_line ();
26656 if (opt
->name
== NULL
)
26657 as_bad (_("unknown architecture extension `%s'\n"), name
);
26659 *input_line_pointer
= saved_char
;
26660 ignore_rest_of_line ();
26663 /* Parse a .fpu directive. */
26666 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
26668 const struct arm_option_fpu_value_table
*opt
;
26672 name
= input_line_pointer
;
26673 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26674 input_line_pointer
++;
26675 saved_char
= *input_line_pointer
;
26676 *input_line_pointer
= 0;
26678 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26679 if (streq (opt
->name
, name
))
26681 mfpu_opt
= &opt
->value
;
26682 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26683 *input_line_pointer
= saved_char
;
26684 demand_empty_rest_of_line ();
26688 as_bad (_("unknown floating point format `%s'\n"), name
);
26689 *input_line_pointer
= saved_char
;
26690 ignore_rest_of_line ();
26693 /* Copy symbol information. */
26696 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
26698 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
26702 /* Given a symbolic attribute NAME, return the proper integer value.
26703 Returns -1 if the attribute is not known. */
26706 arm_convert_symbolic_attribute (const char *name
)
26708 static const struct
26713 attribute_table
[] =
26715 /* When you modify this table you should
26716 also modify the list in doc/c-arm.texi. */
26717 #define T(tag) {#tag, tag}
26718 T (Tag_CPU_raw_name
),
26721 T (Tag_CPU_arch_profile
),
26722 T (Tag_ARM_ISA_use
),
26723 T (Tag_THUMB_ISA_use
),
26727 T (Tag_Advanced_SIMD_arch
),
26728 T (Tag_PCS_config
),
26729 T (Tag_ABI_PCS_R9_use
),
26730 T (Tag_ABI_PCS_RW_data
),
26731 T (Tag_ABI_PCS_RO_data
),
26732 T (Tag_ABI_PCS_GOT_use
),
26733 T (Tag_ABI_PCS_wchar_t
),
26734 T (Tag_ABI_FP_rounding
),
26735 T (Tag_ABI_FP_denormal
),
26736 T (Tag_ABI_FP_exceptions
),
26737 T (Tag_ABI_FP_user_exceptions
),
26738 T (Tag_ABI_FP_number_model
),
26739 T (Tag_ABI_align_needed
),
26740 T (Tag_ABI_align8_needed
),
26741 T (Tag_ABI_align_preserved
),
26742 T (Tag_ABI_align8_preserved
),
26743 T (Tag_ABI_enum_size
),
26744 T (Tag_ABI_HardFP_use
),
26745 T (Tag_ABI_VFP_args
),
26746 T (Tag_ABI_WMMX_args
),
26747 T (Tag_ABI_optimization_goals
),
26748 T (Tag_ABI_FP_optimization_goals
),
26749 T (Tag_compatibility
),
26750 T (Tag_CPU_unaligned_access
),
26751 T (Tag_FP_HP_extension
),
26752 T (Tag_VFP_HP_extension
),
26753 T (Tag_ABI_FP_16bit_format
),
26754 T (Tag_MPextension_use
),
26756 T (Tag_nodefaults
),
26757 T (Tag_also_compatible_with
),
26758 T (Tag_conformance
),
26760 T (Tag_Virtualization_use
),
26761 T (Tag_DSP_extension
),
26762 /* We deliberately do not include Tag_MPextension_use_legacy. */
26770 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
26771 if (streq (name
, attribute_table
[i
].name
))
26772 return attribute_table
[i
].tag
;
26778 /* Apply sym value for relocations only in the case that they are for
26779 local symbols in the same segment as the fixup and you have the
26780 respective architectural feature for blx and simple switches. */
26782 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
26785 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
26786 /* PR 17444: If the local symbol is in a different section then a reloc
26787 will always be generated for it, so applying the symbol value now
26788 will result in a double offset being stored in the relocation. */
26789 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
26790 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
26792 switch (fixP
->fx_r_type
)
26794 case BFD_RELOC_ARM_PCREL_BLX
:
26795 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26796 if (ARM_IS_FUNC (fixP
->fx_addsy
))
26800 case BFD_RELOC_ARM_PCREL_CALL
:
26801 case BFD_RELOC_THUMB_PCREL_BLX
:
26802 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
26813 #endif /* OBJ_ELF */