1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
165 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
167 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
170 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
173 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
174 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
175 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
176 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
177 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
178 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
179 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
180 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
181 static const arm_feature_set arm_ext_v4t_5
=
182 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
183 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
184 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
185 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
186 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
187 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
188 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
189 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
190 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
191 static const arm_feature_set arm_ext_v6_notm
=
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
193 static const arm_feature_set arm_ext_v6_dsp
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
195 static const arm_feature_set arm_ext_barrier
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
197 static const arm_feature_set arm_ext_msr
=
198 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
199 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
200 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
201 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
202 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
204 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
206 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
207 static const arm_feature_set arm_ext_m
=
208 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
,
209 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
210 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
211 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
212 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
213 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
214 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
215 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
216 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
217 static const arm_feature_set arm_ext_v8m_main
=
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
219 /* Instructions in ARMv8-M only found in M profile architectures. */
220 static const arm_feature_set arm_ext_v8m_m_only
=
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
222 static const arm_feature_set arm_ext_v6t2_v8m
=
223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
224 /* Instructions shared between ARMv8-A and ARMv8-M. */
225 static const arm_feature_set arm_ext_atomics
=
226 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
228 /* DSP instructions Tag_DSP_extension refers to. */
229 static const arm_feature_set arm_ext_dsp
=
230 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
232 static const arm_feature_set arm_ext_ras
=
233 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
234 /* FP16 instructions. */
235 static const arm_feature_set arm_ext_fp16
=
236 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
238 static const arm_feature_set arm_arch_any
= ARM_ANY
;
239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
240 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
241 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
243 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
246 static const arm_feature_set arm_cext_iwmmxt2
=
247 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
248 static const arm_feature_set arm_cext_iwmmxt
=
249 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
250 static const arm_feature_set arm_cext_xscale
=
251 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
252 static const arm_feature_set arm_cext_maverick
=
253 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
254 static const arm_feature_set fpu_fpa_ext_v1
=
255 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
256 static const arm_feature_set fpu_fpa_ext_v2
=
257 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
258 static const arm_feature_set fpu_vfp_ext_v1xd
=
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
260 static const arm_feature_set fpu_vfp_ext_v1
=
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
262 static const arm_feature_set fpu_vfp_ext_v2
=
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
264 static const arm_feature_set fpu_vfp_ext_v3xd
=
265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
266 static const arm_feature_set fpu_vfp_ext_v3
=
267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
268 static const arm_feature_set fpu_vfp_ext_d32
=
269 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
270 static const arm_feature_set fpu_neon_ext_v1
=
271 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
272 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
273 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
275 static const arm_feature_set fpu_vfp_fp16
=
276 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
277 static const arm_feature_set fpu_neon_ext_fma
=
278 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
280 static const arm_feature_set fpu_vfp_ext_fma
=
281 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
282 static const arm_feature_set fpu_vfp_ext_armv8
=
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
284 static const arm_feature_set fpu_vfp_ext_armv8xd
=
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
286 static const arm_feature_set fpu_neon_ext_armv8
=
287 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
288 static const arm_feature_set fpu_crypto_ext_armv8
=
289 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
290 static const arm_feature_set crc_ext_armv8
=
291 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
292 static const arm_feature_set fpu_neon_ext_v8_1
=
293 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
295 static int mfloat_abi_opt
= -1;
296 /* Record user cpu selection for object attributes. */
297 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
298 /* Must be long enough to hold any of the names in arm_cpus. */
299 static char selected_cpu_name
[20];
301 extern FLONUM_TYPE generic_floating_point_number
;
303 /* Return if no cpu was selected on command-line. */
305 no_cpu_selected (void)
307 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
312 static int meabi_flags
= EABI_DEFAULT
;
314 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
317 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
322 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
327 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
328 symbolS
* GOT_symbol
;
331 /* 0: assemble for ARM,
332 1: assemble for Thumb,
333 2: assemble for Thumb even though target CPU does not support thumb
335 static int thumb_mode
= 0;
336 /* A value distinct from the possible values for thumb_mode that we
337 can use to record whether thumb_mode has been copied into the
338 tc_frag_data field of a frag. */
339 #define MODE_RECORDED (1 << 4)
341 /* Specifies the intrinsic IT insn behavior mode. */
342 enum implicit_it_mode
344 IMPLICIT_IT_MODE_NEVER
= 0x00,
345 IMPLICIT_IT_MODE_ARM
= 0x01,
346 IMPLICIT_IT_MODE_THUMB
= 0x02,
347 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
349 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
351 /* If unified_syntax is true, we are processing the new unified
352 ARM/Thumb syntax. Important differences from the old ARM mode:
354 - Immediate operands do not require a # prefix.
355 - Conditional affixes always appear at the end of the
356 instruction. (For backward compatibility, those instructions
357 that formerly had them in the middle, continue to accept them
359 - The IT instruction may appear, and if it does is validated
360 against subsequent conditional affixes. It does not generate
363 Important differences from the old Thumb mode:
365 - Immediate operands do not require a # prefix.
366 - Most of the V6T2 instructions are only available in unified mode.
367 - The .N and .W suffixes are recognized and honored (it is an error
368 if they cannot be honored).
369 - All instructions set the flags if and only if they have an 's' affix.
370 - Conditional affixes may be used. They are validated against
371 preceding IT instructions. Unlike ARM mode, you cannot use a
372 conditional affix except in the scope of an IT instruction. */
374 static bfd_boolean unified_syntax
= FALSE
;
376 /* An immediate operand can start with #, and ld*, st*, pld operands
377 can contain [ and ]. We need to tell APP not to elide whitespace
378 before a [, which can appear as the first operand for pld.
379 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
380 const char arm_symbol_chars
[] = "#[]{}";
395 enum neon_el_type type
;
399 #define NEON_MAX_TYPE_ELS 4
403 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
407 enum it_instruction_type
412 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
413 if inside, should be the last one. */
414 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
415 i.e. BKPT and NOP. */
416 IT_INSN
/* The IT insn has been parsed. */
419 /* The maximum number of operands we need. */
420 #define ARM_IT_MAX_OPERANDS 6
425 unsigned long instruction
;
429 /* "uncond_value" is set to the value in place of the conditional field in
430 unconditional versions of the instruction, or -1 if nothing is
433 struct neon_type vectype
;
434 /* This does not indicate an actual NEON instruction, only that
435 the mnemonic accepts neon-style type suffixes. */
437 /* Set to the opcode if the instruction needs relaxation.
438 Zero if the instruction is not relaxed. */
442 bfd_reloc_code_real_type type
;
447 enum it_instruction_type it_insn_type
;
453 struct neon_type_el vectype
;
454 unsigned present
: 1; /* Operand present. */
455 unsigned isreg
: 1; /* Operand was a register. */
456 unsigned immisreg
: 1; /* .imm field is a second register. */
457 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
458 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
459 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
460 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
461 instructions. This allows us to disambiguate ARM <-> vector insns. */
462 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
463 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
464 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
465 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
466 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
467 unsigned writeback
: 1; /* Operand has trailing ! */
468 unsigned preind
: 1; /* Preindexed address. */
469 unsigned postind
: 1; /* Postindexed address. */
470 unsigned negative
: 1; /* Index register was negated. */
471 unsigned shifted
: 1; /* Shift applied to operation. */
472 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
473 } operands
[ARM_IT_MAX_OPERANDS
];
476 static struct arm_it inst
;
478 #define NUM_FLOAT_VALS 8
480 const char * fp_const
[] =
482 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
485 /* Number of littlenums required to hold an extended precision number. */
486 #define MAX_LITTLENUMS 6
488 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
498 #define CP_T_X 0x00008000
499 #define CP_T_Y 0x00400000
501 #define CONDS_BIT 0x00100000
502 #define LOAD_BIT 0x00100000
504 #define DOUBLE_LOAD_FLAG 0x00000001
508 const char * template_name
;
512 #define COND_ALWAYS 0xE
516 const char * template_name
;
520 struct asm_barrier_opt
522 const char * template_name
;
524 const arm_feature_set arch
;
527 /* The bit that distinguishes CPSR and SPSR. */
528 #define SPSR_BIT (1 << 22)
530 /* The individual PSR flag bits. */
531 #define PSR_c (1 << 16)
532 #define PSR_x (1 << 17)
533 #define PSR_s (1 << 18)
534 #define PSR_f (1 << 19)
539 bfd_reloc_code_real_type reloc
;
544 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
545 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
550 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
553 /* Bits for DEFINED field in neon_typed_alias. */
554 #define NTA_HASTYPE 1
555 #define NTA_HASINDEX 2
557 struct neon_typed_alias
559 unsigned char defined
;
561 struct neon_type_el eltype
;
564 /* ARM register categories. This includes coprocessor numbers and various
565 architecture extensions' registers. */
592 /* Structure for a hash table entry for a register.
593 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
594 information which states whether a vector type or index is specified (for a
595 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
601 unsigned char builtin
;
602 struct neon_typed_alias
* neon
;
605 /* Diagnostics used when we don't get a register of the expected type. */
606 const char * const reg_expected_msgs
[] =
608 N_("ARM register expected"),
609 N_("bad or missing co-processor number"),
610 N_("co-processor register expected"),
611 N_("FPA register expected"),
612 N_("VFP single precision register expected"),
613 N_("VFP/Neon double precision register expected"),
614 N_("Neon quad precision register expected"),
615 N_("VFP single or double precision register expected"),
616 N_("Neon double or quad precision register expected"),
617 N_("VFP single, double or Neon quad precision register expected"),
618 N_("VFP system register expected"),
619 N_("Maverick MVF register expected"),
620 N_("Maverick MVD register expected"),
621 N_("Maverick MVFX register expected"),
622 N_("Maverick MVDX register expected"),
623 N_("Maverick MVAX register expected"),
624 N_("Maverick DSPSC register expected"),
625 N_("iWMMXt data register expected"),
626 N_("iWMMXt control register expected"),
627 N_("iWMMXt scalar register expected"),
628 N_("XScale accumulator register expected"),
631 /* Some well known registers that we refer to directly elsewhere. */
637 /* ARM instructions take 4bytes in the object file, Thumb instructions
643 /* Basic string to match. */
644 const char * template_name
;
646 /* Parameters to instruction. */
647 unsigned int operands
[8];
649 /* Conditional tag - see opcode_lookup. */
650 unsigned int tag
: 4;
652 /* Basic instruction code. */
653 unsigned int avalue
: 28;
655 /* Thumb-format instruction code. */
658 /* Which architecture variant provides this instruction. */
659 const arm_feature_set
* avariant
;
660 const arm_feature_set
* tvariant
;
662 /* Function to call to encode instruction in ARM format. */
663 void (* aencode
) (void);
665 /* Function to call to encode instruction in Thumb format. */
666 void (* tencode
) (void);
669 /* Defines for various bits that we will want to toggle. */
670 #define INST_IMMEDIATE 0x02000000
671 #define OFFSET_REG 0x02000000
672 #define HWOFFSET_IMM 0x00400000
673 #define SHIFT_BY_REG 0x00000010
674 #define PRE_INDEX 0x01000000
675 #define INDEX_UP 0x00800000
676 #define WRITE_BACK 0x00200000
677 #define LDM_TYPE_2_OR_3 0x00400000
678 #define CPSI_MMOD 0x00020000
680 #define LITERAL_MASK 0xf000f000
681 #define OPCODE_MASK 0xfe1fffff
682 #define V4_STR_BIT 0x00000020
683 #define VLDR_VMOV_SAME 0x0040f000
685 #define T2_SUBS_PC_LR 0xf3de8f00
687 #define DATA_OP_SHIFT 21
688 #define SBIT_SHIFT 20
690 #define T2_OPCODE_MASK 0xfe1fffff
691 #define T2_DATA_OP_SHIFT 21
692 #define T2_SBIT_SHIFT 20
694 #define A_COND_MASK 0xf0000000
695 #define A_PUSH_POP_OP_MASK 0x0fff0000
697 /* Opcodes for pushing/poping registers to/from the stack. */
698 #define A1_OPCODE_PUSH 0x092d0000
699 #define A2_OPCODE_PUSH 0x052d0004
700 #define A2_OPCODE_POP 0x049d0004
702 /* Codes to distinguish the arithmetic instructions. */
713 #define OPCODE_CMP 10
714 #define OPCODE_CMN 11
715 #define OPCODE_ORR 12
716 #define OPCODE_MOV 13
717 #define OPCODE_BIC 14
718 #define OPCODE_MVN 15
720 #define T2_OPCODE_AND 0
721 #define T2_OPCODE_BIC 1
722 #define T2_OPCODE_ORR 2
723 #define T2_OPCODE_ORN 3
724 #define T2_OPCODE_EOR 4
725 #define T2_OPCODE_ADD 8
726 #define T2_OPCODE_ADC 10
727 #define T2_OPCODE_SBC 11
728 #define T2_OPCODE_SUB 13
729 #define T2_OPCODE_RSB 14
731 #define T_OPCODE_MUL 0x4340
732 #define T_OPCODE_TST 0x4200
733 #define T_OPCODE_CMN 0x42c0
734 #define T_OPCODE_NEG 0x4240
735 #define T_OPCODE_MVN 0x43c0
737 #define T_OPCODE_ADD_R3 0x1800
738 #define T_OPCODE_SUB_R3 0x1a00
739 #define T_OPCODE_ADD_HI 0x4400
740 #define T_OPCODE_ADD_ST 0xb000
741 #define T_OPCODE_SUB_ST 0xb080
742 #define T_OPCODE_ADD_SP 0xa800
743 #define T_OPCODE_ADD_PC 0xa000
744 #define T_OPCODE_ADD_I8 0x3000
745 #define T_OPCODE_SUB_I8 0x3800
746 #define T_OPCODE_ADD_I3 0x1c00
747 #define T_OPCODE_SUB_I3 0x1e00
749 #define T_OPCODE_ASR_R 0x4100
750 #define T_OPCODE_LSL_R 0x4080
751 #define T_OPCODE_LSR_R 0x40c0
752 #define T_OPCODE_ROR_R 0x41c0
753 #define T_OPCODE_ASR_I 0x1000
754 #define T_OPCODE_LSL_I 0x0000
755 #define T_OPCODE_LSR_I 0x0800
757 #define T_OPCODE_MOV_I8 0x2000
758 #define T_OPCODE_CMP_I8 0x2800
759 #define T_OPCODE_CMP_LR 0x4280
760 #define T_OPCODE_MOV_HR 0x4600
761 #define T_OPCODE_CMP_HR 0x4500
763 #define T_OPCODE_LDR_PC 0x4800
764 #define T_OPCODE_LDR_SP 0x9800
765 #define T_OPCODE_STR_SP 0x9000
766 #define T_OPCODE_LDR_IW 0x6800
767 #define T_OPCODE_STR_IW 0x6000
768 #define T_OPCODE_LDR_IH 0x8800
769 #define T_OPCODE_STR_IH 0x8000
770 #define T_OPCODE_LDR_IB 0x7800
771 #define T_OPCODE_STR_IB 0x7000
772 #define T_OPCODE_LDR_RW 0x5800
773 #define T_OPCODE_STR_RW 0x5000
774 #define T_OPCODE_LDR_RH 0x5a00
775 #define T_OPCODE_STR_RH 0x5200
776 #define T_OPCODE_LDR_RB 0x5c00
777 #define T_OPCODE_STR_RB 0x5400
779 #define T_OPCODE_PUSH 0xb400
780 #define T_OPCODE_POP 0xbc00
782 #define T_OPCODE_BRANCH 0xe000
784 #define THUMB_SIZE 2 /* Size of thumb instruction. */
785 #define THUMB_PP_PC_LR 0x0100
786 #define THUMB_LOAD_BIT 0x0800
787 #define THUMB2_LOAD_BIT 0x00100000
789 #define BAD_ARGS _("bad arguments to instruction")
790 #define BAD_SP _("r13 not allowed here")
791 #define BAD_PC _("r15 not allowed here")
792 #define BAD_COND _("instruction cannot be conditional")
793 #define BAD_OVERLAP _("registers may not be the same")
794 #define BAD_HIREG _("lo register required")
795 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
796 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
797 #define BAD_BRANCH _("branch must be last instruction in IT block")
798 #define BAD_NOT_IT _("instruction not allowed in IT block")
799 #define BAD_FPU _("selected FPU does not support instruction")
800 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
801 #define BAD_IT_COND _("incorrect condition in IT block")
802 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
803 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
804 #define BAD_PC_ADDRESSING \
805 _("cannot use register index with PC-relative addressing")
806 #define BAD_PC_WRITEBACK \
807 _("cannot use writeback with PC-relative addressing")
808 #define BAD_RANGE _("branch out of range")
809 #define BAD_FP16 _("selected processor does not support fp16 instruction")
810 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
811 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
813 static struct hash_control
* arm_ops_hsh
;
814 static struct hash_control
* arm_cond_hsh
;
815 static struct hash_control
* arm_shift_hsh
;
816 static struct hash_control
* arm_psr_hsh
;
817 static struct hash_control
* arm_v7m_psr_hsh
;
818 static struct hash_control
* arm_reg_hsh
;
819 static struct hash_control
* arm_reloc_hsh
;
820 static struct hash_control
* arm_barrier_opt_hsh
;
822 /* Stuff needed to resolve the label ambiguity
831 symbolS
* last_label_seen
;
832 static int label_is_thumb_function_name
= FALSE
;
834 /* Literal pool structure. Held on a per-section
835 and per-sub-section basis. */
837 #define MAX_LITERAL_POOL_SIZE 1024
838 typedef struct literal_pool
840 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
841 unsigned int next_free_entry
;
847 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
849 struct literal_pool
* next
;
850 unsigned int alignment
;
853 /* Pointer to a linked list of literal pools. */
854 literal_pool
* list_of_pools
= NULL
;
856 typedef enum asmfunc_states
859 WAITING_ASMFUNC_NAME
,
863 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
866 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
868 static struct current_it now_it
;
872 now_it_compatible (int cond
)
874 return (cond
& ~1) == (now_it
.cc
& ~1);
878 conditional_insn (void)
880 return inst
.cond
!= COND_ALWAYS
;
883 static int in_it_block (void);
885 static int handle_it_state (void);
887 static void force_automatic_it_block_close (void);
889 static void it_fsm_post_encode (void);
891 #define set_it_insn_type(type) \
894 inst.it_insn_type = type; \
895 if (handle_it_state () == FAIL) \
900 #define set_it_insn_type_nonvoid(type, failret) \
903 inst.it_insn_type = type; \
904 if (handle_it_state () == FAIL) \
909 #define set_it_insn_type_last() \
912 if (inst.cond == COND_ALWAYS) \
913 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
915 set_it_insn_type (INSIDE_IT_LAST_INSN); \
921 /* This array holds the chars that always start a comment. If the
922 pre-processor is disabled, these aren't very useful. */
923 char arm_comment_chars
[] = "@";
925 /* This array holds the chars that only start a comment at the beginning of
926 a line. If the line seems to have the form '# 123 filename'
927 .line and .file directives will appear in the pre-processed output. */
928 /* Note that input_file.c hand checks for '#' at the beginning of the
929 first line of the input file. This is because the compiler outputs
930 #NO_APP at the beginning of its output. */
931 /* Also note that comments like this one will always work. */
932 const char line_comment_chars
[] = "#";
934 char arm_line_separator_chars
[] = ";";
936 /* Chars that can be used to separate mant
937 from exp in floating point numbers. */
938 const char EXP_CHARS
[] = "eE";
940 /* Chars that mean this number is a floating point constant. */
944 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
946 /* Prefix characters that indicate the start of an immediate
948 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
950 /* Separator character handling. */
952 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
955 skip_past_char (char ** str
, char c
)
957 /* PR gas/14987: Allow for whitespace before the expected character. */
958 skip_whitespace (*str
);
969 #define skip_past_comma(str) skip_past_char (str, ',')
971 /* Arithmetic expressions (possibly involving symbols). */
973 /* Return TRUE if anything in the expression is a bignum. */
976 walk_no_bignums (symbolS
* sp
)
978 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
981 if (symbol_get_value_expression (sp
)->X_add_symbol
)
983 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
984 || (symbol_get_value_expression (sp
)->X_op_symbol
985 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
991 static int in_my_get_expression
= 0;
993 /* Third argument to my_get_expression. */
994 #define GE_NO_PREFIX 0
995 #define GE_IMM_PREFIX 1
996 #define GE_OPT_PREFIX 2
997 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
998 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
999 #define GE_OPT_PREFIX_BIG 3
1002 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1007 /* In unified syntax, all prefixes are optional. */
1009 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1012 switch (prefix_mode
)
1014 case GE_NO_PREFIX
: break;
1016 if (!is_immediate_prefix (**str
))
1018 inst
.error
= _("immediate expression requires a # prefix");
1024 case GE_OPT_PREFIX_BIG
:
1025 if (is_immediate_prefix (**str
))
1031 memset (ep
, 0, sizeof (expressionS
));
1033 save_in
= input_line_pointer
;
1034 input_line_pointer
= *str
;
1035 in_my_get_expression
= 1;
1036 seg
= expression (ep
);
1037 in_my_get_expression
= 0;
1039 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1041 /* We found a bad or missing expression in md_operand(). */
1042 *str
= input_line_pointer
;
1043 input_line_pointer
= save_in
;
1044 if (inst
.error
== NULL
)
1045 inst
.error
= (ep
->X_op
== O_absent
1046 ? _("missing expression") :_("bad expression"));
1051 if (seg
!= absolute_section
1052 && seg
!= text_section
1053 && seg
!= data_section
1054 && seg
!= bss_section
1055 && seg
!= undefined_section
)
1057 inst
.error
= _("bad segment");
1058 *str
= input_line_pointer
;
1059 input_line_pointer
= save_in
;
1066 /* Get rid of any bignums now, so that we don't generate an error for which
1067 we can't establish a line number later on. Big numbers are never valid
1068 in instructions, which is where this routine is always called. */
1069 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1070 && (ep
->X_op
== O_big
1071 || (ep
->X_add_symbol
1072 && (walk_no_bignums (ep
->X_add_symbol
)
1074 && walk_no_bignums (ep
->X_op_symbol
))))))
1076 inst
.error
= _("invalid constant");
1077 *str
= input_line_pointer
;
1078 input_line_pointer
= save_in
;
1082 *str
= input_line_pointer
;
1083 input_line_pointer
= save_in
;
1087 /* Turn a string in input_line_pointer into a floating point constant
1088 of type TYPE, and store the appropriate bytes in *LITP. The number
1089 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1090 returned, or NULL on OK.
1092 Note that fp constants aren't represent in the normal way on the ARM.
1093 In big endian mode, things are as expected. However, in little endian
1094 mode fp constants are big-endian word-wise, and little-endian byte-wise
1095 within the words. For example, (double) 1.1 in big endian mode is
1096 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1097 the byte sequence 99 99 f1 3f 9a 99 99 99.
1099 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1102 md_atof (int type
, char * litP
, int * sizeP
)
1105 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1137 return _("Unrecognized or unsupported floating point constant");
1140 t
= atof_ieee (input_line_pointer
, type
, words
);
1142 input_line_pointer
= t
;
1143 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1145 if (target_big_endian
)
1147 for (i
= 0; i
< prec
; i
++)
1149 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1150 litP
+= sizeof (LITTLENUM_TYPE
);
1155 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1156 for (i
= prec
- 1; i
>= 0; i
--)
1158 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1159 litP
+= sizeof (LITTLENUM_TYPE
);
1162 /* For a 4 byte float the order of elements in `words' is 1 0.
1163 For an 8 byte float the order is 1 0 3 2. */
1164 for (i
= 0; i
< prec
; i
+= 2)
1166 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1167 sizeof (LITTLENUM_TYPE
));
1168 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1169 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1170 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1177 /* We handle all bad expressions here, so that we can report the faulty
1178 instruction in the error message. */
1180 md_operand (expressionS
* exp
)
1182 if (in_my_get_expression
)
1183 exp
->X_op
= O_illegal
;
1186 /* Immediate values. */
1188 /* Generic immediate-value read function for use in directives.
1189 Accepts anything that 'expression' can fold to a constant.
1190 *val receives the number. */
1193 immediate_for_directive (int *val
)
1196 exp
.X_op
= O_illegal
;
1198 if (is_immediate_prefix (*input_line_pointer
))
1200 input_line_pointer
++;
1204 if (exp
.X_op
!= O_constant
)
1206 as_bad (_("expected #constant"));
1207 ignore_rest_of_line ();
1210 *val
= exp
.X_add_number
;
1215 /* Register parsing. */
1217 /* Generic register parser. CCP points to what should be the
1218 beginning of a register name. If it is indeed a valid register
1219 name, advance CCP over it and return the reg_entry structure;
1220 otherwise return NULL. Does not issue diagnostics. */
1222 static struct reg_entry
*
1223 arm_reg_parse_multi (char **ccp
)
1227 struct reg_entry
*reg
;
1229 skip_whitespace (start
);
1231 #ifdef REGISTER_PREFIX
1232 if (*start
!= REGISTER_PREFIX
)
1236 #ifdef OPTIONAL_REGISTER_PREFIX
1237 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1242 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1247 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1249 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1259 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1260 enum arm_reg_type type
)
1262 /* Alternative syntaxes are accepted for a few register classes. */
1269 /* Generic coprocessor register names are allowed for these. */
1270 if (reg
&& reg
->type
== REG_TYPE_CN
)
1275 /* For backward compatibility, a bare number is valid here. */
1277 unsigned long processor
= strtoul (start
, ccp
, 10);
1278 if (*ccp
!= start
&& processor
<= 15)
1283 case REG_TYPE_MMXWC
:
1284 /* WC includes WCG. ??? I'm not sure this is true for all
1285 instructions that take WC registers. */
1286 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1297 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1298 return value is the register number or FAIL. */
1301 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1304 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1307 /* Do not allow a scalar (reg+index) to parse as a register. */
1308 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1311 if (reg
&& reg
->type
== type
)
1314 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1321 /* Parse a Neon type specifier. *STR should point at the leading '.'
1322 character. Does no verification at this stage that the type fits the opcode
1329 Can all be legally parsed by this function.
1331 Fills in neon_type struct pointer with parsed information, and updates STR
1332 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1333 type, FAIL if not. */
1336 parse_neon_type (struct neon_type
*type
, char **str
)
1343 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1345 enum neon_el_type thistype
= NT_untyped
;
1346 unsigned thissize
= -1u;
1353 /* Just a size without an explicit type. */
1357 switch (TOLOWER (*ptr
))
1359 case 'i': thistype
= NT_integer
; break;
1360 case 'f': thistype
= NT_float
; break;
1361 case 'p': thistype
= NT_poly
; break;
1362 case 's': thistype
= NT_signed
; break;
1363 case 'u': thistype
= NT_unsigned
; break;
1365 thistype
= NT_float
;
1370 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1376 /* .f is an abbreviation for .f32. */
1377 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1382 thissize
= strtoul (ptr
, &ptr
, 10);
1384 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1387 as_bad (_("bad size %d in type specifier"), thissize
);
1395 type
->el
[type
->elems
].type
= thistype
;
1396 type
->el
[type
->elems
].size
= thissize
;
1401 /* Empty/missing type is not a successful parse. */
1402 if (type
->elems
== 0)
1410 /* Errors may be set multiple times during parsing or bit encoding
1411 (particularly in the Neon bits), but usually the earliest error which is set
1412 will be the most meaningful. Avoid overwriting it with later (cascading)
1413 errors by calling this function. */
1416 first_error (const char *err
)
1422 /* Parse a single type, e.g. ".s32", leading period included. */
1424 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1427 struct neon_type optype
;
1431 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1433 if (optype
.elems
== 1)
1434 *vectype
= optype
.el
[0];
1437 first_error (_("only one type should be specified for operand"));
1443 first_error (_("vector type expected"));
1455 /* Special meanings for indices (which have a range of 0-7), which will fit into
1458 #define NEON_ALL_LANES 15
1459 #define NEON_INTERLEAVE_LANES 14
1461 /* Parse either a register or a scalar, with an optional type. Return the
1462 register number, and optionally fill in the actual type of the register
1463 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1464 type/index information in *TYPEINFO. */
1467 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1468 enum arm_reg_type
*rtype
,
1469 struct neon_typed_alias
*typeinfo
)
1472 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1473 struct neon_typed_alias atype
;
1474 struct neon_type_el parsetype
;
1478 atype
.eltype
.type
= NT_invtype
;
1479 atype
.eltype
.size
= -1;
1481 /* Try alternate syntax for some types of register. Note these are mutually
1482 exclusive with the Neon syntax extensions. */
1485 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1493 /* Undo polymorphism when a set of register types may be accepted. */
1494 if ((type
== REG_TYPE_NDQ
1495 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1496 || (type
== REG_TYPE_VFSD
1497 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1498 || (type
== REG_TYPE_NSDQ
1499 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1500 || reg
->type
== REG_TYPE_NQ
))
1501 || (type
== REG_TYPE_MMXWC
1502 && (reg
->type
== REG_TYPE_MMXWCG
)))
1503 type
= (enum arm_reg_type
) reg
->type
;
1505 if (type
!= reg
->type
)
1511 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1513 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1515 first_error (_("can't redefine type for operand"));
1518 atype
.defined
|= NTA_HASTYPE
;
1519 atype
.eltype
= parsetype
;
1522 if (skip_past_char (&str
, '[') == SUCCESS
)
1524 if (type
!= REG_TYPE_VFD
)
1526 first_error (_("only D registers may be indexed"));
1530 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1532 first_error (_("can't change index for operand"));
1536 atype
.defined
|= NTA_HASINDEX
;
1538 if (skip_past_char (&str
, ']') == SUCCESS
)
1539 atype
.index
= NEON_ALL_LANES
;
1544 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1546 if (exp
.X_op
!= O_constant
)
1548 first_error (_("constant expression required"));
1552 if (skip_past_char (&str
, ']') == FAIL
)
1555 atype
.index
= exp
.X_add_number
;
1570 /* Like arm_reg_parse, but allow allow the following extra features:
1571 - If RTYPE is non-zero, return the (possibly restricted) type of the
1572 register (e.g. Neon double or quad reg when either has been requested).
1573 - If this is a Neon vector type with additional type information, fill
1574 in the struct pointed to by VECTYPE (if non-NULL).
1575 This function will fault on encountering a scalar. */
1578 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1579 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1581 struct neon_typed_alias atype
;
1583 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1588 /* Do not allow regname(... to parse as a register. */
1592 /* Do not allow a scalar (reg+index) to parse as a register. */
1593 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1595 first_error (_("register operand expected, but got scalar"));
1600 *vectype
= atype
.eltype
;
1607 #define NEON_SCALAR_REG(X) ((X) >> 4)
1608 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1610 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1611 have enough information to be able to do a good job bounds-checking. So, we
1612 just do easy checks here, and do further checks later. */
1615 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1619 struct neon_typed_alias atype
;
1621 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1623 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1626 if (atype
.index
== NEON_ALL_LANES
)
1628 first_error (_("scalar must have an index"));
1631 else if (atype
.index
>= 64 / elsize
)
1633 first_error (_("scalar index out of range"));
1638 *type
= atype
.eltype
;
1642 return reg
* 16 + atype
.index
;
1645 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1648 parse_reg_list (char ** strp
)
1650 char * str
= * strp
;
1654 /* We come back here if we get ranges concatenated by '+' or '|'. */
1657 skip_whitespace (str
);
1671 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1673 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1683 first_error (_("bad range in register list"));
1687 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1689 if (range
& (1 << i
))
1691 (_("Warning: duplicated register (r%d) in register list"),
1699 if (range
& (1 << reg
))
1700 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1702 else if (reg
<= cur_reg
)
1703 as_tsktsk (_("Warning: register range not in ascending order"));
1708 while (skip_past_comma (&str
) != FAIL
1709 || (in_range
= 1, *str
++ == '-'));
1712 if (skip_past_char (&str
, '}') == FAIL
)
1714 first_error (_("missing `}'"));
1722 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1725 if (exp
.X_op
== O_constant
)
1727 if (exp
.X_add_number
1728 != (exp
.X_add_number
& 0x0000ffff))
1730 inst
.error
= _("invalid register mask");
1734 if ((range
& exp
.X_add_number
) != 0)
1736 int regno
= range
& exp
.X_add_number
;
1739 regno
= (1 << regno
) - 1;
1741 (_("Warning: duplicated register (r%d) in register list"),
1745 range
|= exp
.X_add_number
;
1749 if (inst
.reloc
.type
!= 0)
1751 inst
.error
= _("expression too complex");
1755 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1756 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1757 inst
.reloc
.pc_rel
= 0;
1761 if (*str
== '|' || *str
== '+')
1767 while (another_range
);
1773 /* Types of registers in a list. */
1782 /* Parse a VFP register list. If the string is invalid return FAIL.
1783 Otherwise return the number of registers, and set PBASE to the first
1784 register. Parses registers of type ETYPE.
1785 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1786 - Q registers can be used to specify pairs of D registers
1787 - { } can be omitted from around a singleton register list
1788 FIXME: This is not implemented, as it would require backtracking in
1791 This could be done (the meaning isn't really ambiguous), but doesn't
1792 fit in well with the current parsing framework.
1793 - 32 D registers may be used (also true for VFPv3).
1794 FIXME: Types are ignored in these register lists, which is probably a
1798 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1803 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1807 unsigned long mask
= 0;
1810 if (skip_past_char (&str
, '{') == FAIL
)
1812 inst
.error
= _("expecting {");
1819 regtype
= REG_TYPE_VFS
;
1824 regtype
= REG_TYPE_VFD
;
1827 case REGLIST_NEON_D
:
1828 regtype
= REG_TYPE_NDQ
;
1832 if (etype
!= REGLIST_VFP_S
)
1834 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1835 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1839 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1842 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1849 base_reg
= max_regs
;
1853 int setmask
= 1, addregs
= 1;
1855 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1857 if (new_base
== FAIL
)
1859 first_error (_(reg_expected_msgs
[regtype
]));
1863 if (new_base
>= max_regs
)
1865 first_error (_("register out of range in list"));
1869 /* Note: a value of 2 * n is returned for the register Q<n>. */
1870 if (regtype
== REG_TYPE_NQ
)
1876 if (new_base
< base_reg
)
1877 base_reg
= new_base
;
1879 if (mask
& (setmask
<< new_base
))
1881 first_error (_("invalid register list"));
1885 if ((mask
>> new_base
) != 0 && ! warned
)
1887 as_tsktsk (_("register list not in ascending order"));
1891 mask
|= setmask
<< new_base
;
1894 if (*str
== '-') /* We have the start of a range expression */
1900 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1903 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1907 if (high_range
>= max_regs
)
1909 first_error (_("register out of range in list"));
1913 if (regtype
== REG_TYPE_NQ
)
1914 high_range
= high_range
+ 1;
1916 if (high_range
<= new_base
)
1918 inst
.error
= _("register range not in ascending order");
1922 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1924 if (mask
& (setmask
<< new_base
))
1926 inst
.error
= _("invalid register list");
1930 mask
|= setmask
<< new_base
;
1935 while (skip_past_comma (&str
) != FAIL
);
1939 /* Sanity check -- should have raised a parse error above. */
1940 if (count
== 0 || count
> max_regs
)
1945 /* Final test -- the registers must be consecutive. */
1947 for (i
= 0; i
< count
; i
++)
1949 if ((mask
& (1u << i
)) == 0)
1951 inst
.error
= _("non-contiguous register range");
1961 /* True if two alias types are the same. */
1964 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1972 if (a
->defined
!= b
->defined
)
1975 if ((a
->defined
& NTA_HASTYPE
) != 0
1976 && (a
->eltype
.type
!= b
->eltype
.type
1977 || a
->eltype
.size
!= b
->eltype
.size
))
1980 if ((a
->defined
& NTA_HASINDEX
) != 0
1981 && (a
->index
!= b
->index
))
1987 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1988 The base register is put in *PBASE.
1989 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1991 The register stride (minus one) is put in bit 4 of the return value.
1992 Bits [6:5] encode the list length (minus one).
1993 The type of the list elements is put in *ELTYPE, if non-NULL. */
1995 #define NEON_LANE(X) ((X) & 0xf)
1996 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1997 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2000 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2001 struct neon_type_el
*eltype
)
2008 int leading_brace
= 0;
2009 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2010 const char *const incr_error
= _("register stride must be 1 or 2");
2011 const char *const type_error
= _("mismatched element/structure types in list");
2012 struct neon_typed_alias firsttype
;
2013 firsttype
.defined
= 0;
2014 firsttype
.eltype
.type
= NT_invtype
;
2015 firsttype
.eltype
.size
= -1;
2016 firsttype
.index
= -1;
2018 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2023 struct neon_typed_alias atype
;
2024 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2028 first_error (_(reg_expected_msgs
[rtype
]));
2035 if (rtype
== REG_TYPE_NQ
)
2041 else if (reg_incr
== -1)
2043 reg_incr
= getreg
- base_reg
;
2044 if (reg_incr
< 1 || reg_incr
> 2)
2046 first_error (_(incr_error
));
2050 else if (getreg
!= base_reg
+ reg_incr
* count
)
2052 first_error (_(incr_error
));
2056 if (! neon_alias_types_same (&atype
, &firsttype
))
2058 first_error (_(type_error
));
2062 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2066 struct neon_typed_alias htype
;
2067 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2069 lane
= NEON_INTERLEAVE_LANES
;
2070 else if (lane
!= NEON_INTERLEAVE_LANES
)
2072 first_error (_(type_error
));
2077 else if (reg_incr
!= 1)
2079 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2083 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2086 first_error (_(reg_expected_msgs
[rtype
]));
2089 if (! neon_alias_types_same (&htype
, &firsttype
))
2091 first_error (_(type_error
));
2094 count
+= hireg
+ dregs
- getreg
;
2098 /* If we're using Q registers, we can't use [] or [n] syntax. */
2099 if (rtype
== REG_TYPE_NQ
)
2105 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2109 else if (lane
!= atype
.index
)
2111 first_error (_(type_error
));
2115 else if (lane
== -1)
2116 lane
= NEON_INTERLEAVE_LANES
;
2117 else if (lane
!= NEON_INTERLEAVE_LANES
)
2119 first_error (_(type_error
));
2124 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2126 /* No lane set by [x]. We must be interleaving structures. */
2128 lane
= NEON_INTERLEAVE_LANES
;
2131 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2132 || (count
> 1 && reg_incr
== -1))
2134 first_error (_("error parsing element/structure list"));
2138 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2140 first_error (_("expected }"));
2148 *eltype
= firsttype
.eltype
;
2153 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2156 /* Parse an explicit relocation suffix on an expression. This is
2157 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2158 arm_reloc_hsh contains no entries, so this function can only
2159 succeed if there is no () after the word. Returns -1 on error,
2160 BFD_RELOC_UNUSED if there wasn't any suffix. */
2163 parse_reloc (char **str
)
2165 struct reloc_entry
*r
;
2169 return BFD_RELOC_UNUSED
;
2174 while (*q
&& *q
!= ')' && *q
!= ',')
2179 if ((r
= (struct reloc_entry
*)
2180 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2187 /* Directives: register aliases. */
2189 static struct reg_entry
*
2190 insert_reg_alias (char *str
, unsigned number
, int type
)
2192 struct reg_entry
*new_reg
;
2195 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2197 if (new_reg
->builtin
)
2198 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2200 /* Only warn about a redefinition if it's not defined as the
2202 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2203 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2208 name
= xstrdup (str
);
2209 new_reg
= XNEW (struct reg_entry
);
2211 new_reg
->name
= name
;
2212 new_reg
->number
= number
;
2213 new_reg
->type
= type
;
2214 new_reg
->builtin
= FALSE
;
2215 new_reg
->neon
= NULL
;
2217 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2224 insert_neon_reg_alias (char *str
, int number
, int type
,
2225 struct neon_typed_alias
*atype
)
2227 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2231 first_error (_("attempt to redefine typed alias"));
2237 reg
->neon
= XNEW (struct neon_typed_alias
);
2238 *reg
->neon
= *atype
;
2242 /* Look for the .req directive. This is of the form:
2244 new_register_name .req existing_register_name
2246 If we find one, or if it looks sufficiently like one that we want to
2247 handle any error here, return TRUE. Otherwise return FALSE. */
2250 create_register_alias (char * newname
, char *p
)
2252 struct reg_entry
*old
;
2253 char *oldname
, *nbuf
;
2256 /* The input scrubber ensures that whitespace after the mnemonic is
2257 collapsed to single spaces. */
2259 if (strncmp (oldname
, " .req ", 6) != 0)
2263 if (*oldname
== '\0')
2266 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2269 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2273 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2274 the desired alias name, and p points to its end. If not, then
2275 the desired alias name is in the global original_case_string. */
2276 #ifdef TC_CASE_SENSITIVE
2279 newname
= original_case_string
;
2280 nlen
= strlen (newname
);
2283 nbuf
= xmemdup0 (newname
, nlen
);
2285 /* Create aliases under the new name as stated; an all-lowercase
2286 version of the new name; and an all-uppercase version of the new
2288 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2290 for (p
= nbuf
; *p
; p
++)
2293 if (strncmp (nbuf
, newname
, nlen
))
2295 /* If this attempt to create an additional alias fails, do not bother
2296 trying to create the all-lower case alias. We will fail and issue
2297 a second, duplicate error message. This situation arises when the
2298 programmer does something like:
2301 The second .req creates the "Foo" alias but then fails to create
2302 the artificial FOO alias because it has already been created by the
2304 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2311 for (p
= nbuf
; *p
; p
++)
2314 if (strncmp (nbuf
, newname
, nlen
))
2315 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2322 /* Create a Neon typed/indexed register alias using directives, e.g.:
2327 These typed registers can be used instead of the types specified after the
2328 Neon mnemonic, so long as all operands given have types. Types can also be
2329 specified directly, e.g.:
2330 vadd d0.s32, d1.s32, d2.s32 */
2333 create_neon_reg_alias (char *newname
, char *p
)
2335 enum arm_reg_type basetype
;
2336 struct reg_entry
*basereg
;
2337 struct reg_entry mybasereg
;
2338 struct neon_type ntype
;
2339 struct neon_typed_alias typeinfo
;
2340 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2343 typeinfo
.defined
= 0;
2344 typeinfo
.eltype
.type
= NT_invtype
;
2345 typeinfo
.eltype
.size
= -1;
2346 typeinfo
.index
= -1;
2350 if (strncmp (p
, " .dn ", 5) == 0)
2351 basetype
= REG_TYPE_VFD
;
2352 else if (strncmp (p
, " .qn ", 5) == 0)
2353 basetype
= REG_TYPE_NQ
;
2362 basereg
= arm_reg_parse_multi (&p
);
2364 if (basereg
&& basereg
->type
!= basetype
)
2366 as_bad (_("bad type for register"));
2370 if (basereg
== NULL
)
2373 /* Try parsing as an integer. */
2374 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2375 if (exp
.X_op
!= O_constant
)
2377 as_bad (_("expression must be constant"));
2380 basereg
= &mybasereg
;
2381 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2387 typeinfo
= *basereg
->neon
;
2389 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2391 /* We got a type. */
2392 if (typeinfo
.defined
& NTA_HASTYPE
)
2394 as_bad (_("can't redefine the type of a register alias"));
2398 typeinfo
.defined
|= NTA_HASTYPE
;
2399 if (ntype
.elems
!= 1)
2401 as_bad (_("you must specify a single type only"));
2404 typeinfo
.eltype
= ntype
.el
[0];
2407 if (skip_past_char (&p
, '[') == SUCCESS
)
2410 /* We got a scalar index. */
2412 if (typeinfo
.defined
& NTA_HASINDEX
)
2414 as_bad (_("can't redefine the index of a scalar alias"));
2418 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2420 if (exp
.X_op
!= O_constant
)
2422 as_bad (_("scalar index must be constant"));
2426 typeinfo
.defined
|= NTA_HASINDEX
;
2427 typeinfo
.index
= exp
.X_add_number
;
2429 if (skip_past_char (&p
, ']') == FAIL
)
2431 as_bad (_("expecting ]"));
2436 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2437 the desired alias name, and p points to its end. If not, then
2438 the desired alias name is in the global original_case_string. */
2439 #ifdef TC_CASE_SENSITIVE
2440 namelen
= nameend
- newname
;
2442 newname
= original_case_string
;
2443 namelen
= strlen (newname
);
2446 namebuf
= xmemdup0 (newname
, namelen
);
2448 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2449 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2451 /* Insert name in all uppercase. */
2452 for (p
= namebuf
; *p
; p
++)
2455 if (strncmp (namebuf
, newname
, namelen
))
2456 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2457 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2459 /* Insert name in all lowercase. */
2460 for (p
= namebuf
; *p
; p
++)
2463 if (strncmp (namebuf
, newname
, namelen
))
2464 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2465 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2471 /* Should never be called, as .req goes between the alias and the
2472 register name, not at the beginning of the line. */
2475 s_req (int a ATTRIBUTE_UNUSED
)
2477 as_bad (_("invalid syntax for .req directive"));
2481 s_dn (int a ATTRIBUTE_UNUSED
)
2483 as_bad (_("invalid syntax for .dn directive"));
2487 s_qn (int a ATTRIBUTE_UNUSED
)
2489 as_bad (_("invalid syntax for .qn directive"));
2492 /* The .unreq directive deletes an alias which was previously defined
2493 by .req. For example:
2499 s_unreq (int a ATTRIBUTE_UNUSED
)
2504 name
= input_line_pointer
;
2506 while (*input_line_pointer
!= 0
2507 && *input_line_pointer
!= ' '
2508 && *input_line_pointer
!= '\n')
2509 ++input_line_pointer
;
2511 saved_char
= *input_line_pointer
;
2512 *input_line_pointer
= 0;
2515 as_bad (_("invalid syntax for .unreq directive"));
2518 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2522 as_bad (_("unknown register alias '%s'"), name
);
2523 else if (reg
->builtin
)
2524 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2531 hash_delete (arm_reg_hsh
, name
, FALSE
);
2532 free ((char *) reg
->name
);
2537 /* Also locate the all upper case and all lower case versions.
2538 Do not complain if we cannot find one or the other as it
2539 was probably deleted above. */
2541 nbuf
= strdup (name
);
2542 for (p
= nbuf
; *p
; p
++)
2544 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2547 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2548 free ((char *) reg
->name
);
2554 for (p
= nbuf
; *p
; p
++)
2556 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2559 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2560 free ((char *) reg
->name
);
2570 *input_line_pointer
= saved_char
;
2571 demand_empty_rest_of_line ();
2574 /* Directives: Instruction set selection. */
2577 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2578 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2579 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2580 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2582 /* Create a new mapping symbol for the transition to STATE. */
2585 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2588 const char * symname
;
2595 type
= BSF_NO_FLAGS
;
2599 type
= BSF_NO_FLAGS
;
2603 type
= BSF_NO_FLAGS
;
2609 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2610 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2615 THUMB_SET_FUNC (symbolP
, 0);
2616 ARM_SET_THUMB (symbolP
, 0);
2617 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2621 THUMB_SET_FUNC (symbolP
, 1);
2622 ARM_SET_THUMB (symbolP
, 1);
2623 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2631 /* Save the mapping symbols for future reference. Also check that
2632 we do not place two mapping symbols at the same offset within a
2633 frag. We'll handle overlap between frags in
2634 check_mapping_symbols.
2636 If .fill or other data filling directive generates zero sized data,
2637 the mapping symbol for the following code will have the same value
2638 as the one generated for the data filling directive. In this case,
2639 we replace the old symbol with the new one at the same address. */
2642 if (frag
->tc_frag_data
.first_map
!= NULL
)
2644 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2645 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2647 frag
->tc_frag_data
.first_map
= symbolP
;
2649 if (frag
->tc_frag_data
.last_map
!= NULL
)
2651 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2652 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2653 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2655 frag
->tc_frag_data
.last_map
= symbolP
;
2658 /* We must sometimes convert a region marked as code to data during
2659 code alignment, if an odd number of bytes have to be padded. The
2660 code mapping symbol is pushed to an aligned address. */
2663 insert_data_mapping_symbol (enum mstate state
,
2664 valueT value
, fragS
*frag
, offsetT bytes
)
2666 /* If there was already a mapping symbol, remove it. */
2667 if (frag
->tc_frag_data
.last_map
!= NULL
2668 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2670 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2674 know (frag
->tc_frag_data
.first_map
== symp
);
2675 frag
->tc_frag_data
.first_map
= NULL
;
2677 frag
->tc_frag_data
.last_map
= NULL
;
2678 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2681 make_mapping_symbol (MAP_DATA
, value
, frag
);
2682 make_mapping_symbol (state
, value
+ bytes
, frag
);
2685 static void mapping_state_2 (enum mstate state
, int max_chars
);
2687 /* Set the mapping state to STATE. Only call this when about to
2688 emit some STATE bytes to the file. */
2690 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2692 mapping_state (enum mstate state
)
2694 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2696 if (mapstate
== state
)
2697 /* The mapping symbol has already been emitted.
2698 There is nothing else to do. */
2701 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2703 All ARM instructions require 4-byte alignment.
2704 (Almost) all Thumb instructions require 2-byte alignment.
2706 When emitting instructions into any section, mark the section
2709 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2710 but themselves require 2-byte alignment; this applies to some
2711 PC- relative forms. However, these cases will invovle implicit
2712 literal pool generation or an explicit .align >=2, both of
2713 which will cause the section to me marked with sufficient
2714 alignment. Thus, we don't handle those cases here. */
2715 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2717 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2718 /* This case will be evaluated later. */
2721 mapping_state_2 (state
, 0);
2724 /* Same as mapping_state, but MAX_CHARS bytes have already been
2725 allocated. Put the mapping symbol that far back. */
2728 mapping_state_2 (enum mstate state
, int max_chars
)
2730 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2732 if (!SEG_NORMAL (now_seg
))
2735 if (mapstate
== state
)
2736 /* The mapping symbol has already been emitted.
2737 There is nothing else to do. */
2740 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2741 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2743 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2744 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2747 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2750 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2751 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2755 #define mapping_state(x) ((void)0)
2756 #define mapping_state_2(x, y) ((void)0)
2759 /* Find the real, Thumb encoded start of a Thumb function. */
2763 find_real_start (symbolS
* symbolP
)
2766 const char * name
= S_GET_NAME (symbolP
);
2767 symbolS
* new_target
;
2769 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2770 #define STUB_NAME ".real_start_of"
2775 /* The compiler may generate BL instructions to local labels because
2776 it needs to perform a branch to a far away location. These labels
2777 do not have a corresponding ".real_start_of" label. We check
2778 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2779 the ".real_start_of" convention for nonlocal branches. */
2780 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2783 real_start
= concat (STUB_NAME
, name
, NULL
);
2784 new_target
= symbol_find (real_start
);
2787 if (new_target
== NULL
)
2789 as_warn (_("Failed to find real start of function: %s\n"), name
);
2790 new_target
= symbolP
;
2798 opcode_select (int width
)
2805 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2806 as_bad (_("selected processor does not support THUMB opcodes"));
2809 /* No need to force the alignment, since we will have been
2810 coming from ARM mode, which is word-aligned. */
2811 record_alignment (now_seg
, 1);
2818 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2819 as_bad (_("selected processor does not support ARM opcodes"));
2824 frag_align (2, 0, 0);
2826 record_alignment (now_seg
, 1);
2831 as_bad (_("invalid instruction size selected (%d)"), width
);
2836 s_arm (int ignore ATTRIBUTE_UNUSED
)
2839 demand_empty_rest_of_line ();
2843 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2846 demand_empty_rest_of_line ();
2850 s_code (int unused ATTRIBUTE_UNUSED
)
2854 temp
= get_absolute_expression ();
2859 opcode_select (temp
);
2863 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2868 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2870 /* If we are not already in thumb mode go into it, EVEN if
2871 the target processor does not support thumb instructions.
2872 This is used by gcc/config/arm/lib1funcs.asm for example
2873 to compile interworking support functions even if the
2874 target processor should not support interworking. */
2878 record_alignment (now_seg
, 1);
2881 demand_empty_rest_of_line ();
2885 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2889 /* The following label is the name/address of the start of a Thumb function.
2890 We need to know this for the interworking support. */
2891 label_is_thumb_function_name
= TRUE
;
2894 /* Perform a .set directive, but also mark the alias as
2895 being a thumb function. */
2898 s_thumb_set (int equiv
)
2900 /* XXX the following is a duplicate of the code for s_set() in read.c
2901 We cannot just call that code as we need to get at the symbol that
2908 /* Especial apologies for the random logic:
2909 This just grew, and could be parsed much more simply!
2911 delim
= get_symbol_name (& name
);
2912 end_name
= input_line_pointer
;
2913 (void) restore_line_pointer (delim
);
2915 if (*input_line_pointer
!= ',')
2918 as_bad (_("expected comma after name \"%s\""), name
);
2920 ignore_rest_of_line ();
2924 input_line_pointer
++;
2927 if (name
[0] == '.' && name
[1] == '\0')
2929 /* XXX - this should not happen to .thumb_set. */
2933 if ((symbolP
= symbol_find (name
)) == NULL
2934 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2937 /* When doing symbol listings, play games with dummy fragments living
2938 outside the normal fragment chain to record the file and line info
2940 if (listing
& LISTING_SYMBOLS
)
2942 extern struct list_info_struct
* listing_tail
;
2943 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2945 memset (dummy_frag
, 0, sizeof (fragS
));
2946 dummy_frag
->fr_type
= rs_fill
;
2947 dummy_frag
->line
= listing_tail
;
2948 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2949 dummy_frag
->fr_symbol
= symbolP
;
2953 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2956 /* "set" symbols are local unless otherwise specified. */
2957 SF_SET_LOCAL (symbolP
);
2958 #endif /* OBJ_COFF */
2959 } /* Make a new symbol. */
2961 symbol_table_insert (symbolP
);
2966 && S_IS_DEFINED (symbolP
)
2967 && S_GET_SEGMENT (symbolP
) != reg_section
)
2968 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2970 pseudo_set (symbolP
);
2972 demand_empty_rest_of_line ();
2974 /* XXX Now we come to the Thumb specific bit of code. */
2976 THUMB_SET_FUNC (symbolP
, 1);
2977 ARM_SET_THUMB (symbolP
, 1);
2978 #if defined OBJ_ELF || defined OBJ_COFF
2979 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2983 /* Directives: Mode selection. */
2985 /* .syntax [unified|divided] - choose the new unified syntax
2986 (same for Arm and Thumb encoding, modulo slight differences in what
2987 can be represented) or the old divergent syntax for each mode. */
2989 s_syntax (int unused ATTRIBUTE_UNUSED
)
2993 delim
= get_symbol_name (& name
);
2995 if (!strcasecmp (name
, "unified"))
2996 unified_syntax
= TRUE
;
2997 else if (!strcasecmp (name
, "divided"))
2998 unified_syntax
= FALSE
;
3001 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3004 (void) restore_line_pointer (delim
);
3005 demand_empty_rest_of_line ();
3008 /* Directives: sectioning and alignment. */
3011 s_bss (int ignore ATTRIBUTE_UNUSED
)
3013 /* We don't support putting frags in the BSS segment, we fake it by
3014 marking in_bss, then looking at s_skip for clues. */
3015 subseg_set (bss_section
, 0);
3016 demand_empty_rest_of_line ();
3018 #ifdef md_elf_section_change_hook
3019 md_elf_section_change_hook ();
3024 s_even (int ignore ATTRIBUTE_UNUSED
)
3026 /* Never make frag if expect extra pass. */
3028 frag_align (1, 0, 0);
3030 record_alignment (now_seg
, 1);
3032 demand_empty_rest_of_line ();
3035 /* Directives: CodeComposer Studio. */
3037 /* .ref (for CodeComposer Studio syntax only). */
3039 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3041 if (codecomposer_syntax
)
3042 ignore_rest_of_line ();
3044 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3047 /* If name is not NULL, then it is used for marking the beginning of a
3048 function, whereas if it is NULL then it means the function end. */
3050 asmfunc_debug (const char * name
)
3052 static const char * last_name
= NULL
;
3056 gas_assert (last_name
== NULL
);
3059 if (debug_type
== DEBUG_STABS
)
3060 stabs_generate_asm_func (name
, name
);
3064 gas_assert (last_name
!= NULL
);
3066 if (debug_type
== DEBUG_STABS
)
3067 stabs_generate_asm_endfunc (last_name
, last_name
);
3074 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3076 if (codecomposer_syntax
)
3078 switch (asmfunc_state
)
3080 case OUTSIDE_ASMFUNC
:
3081 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3084 case WAITING_ASMFUNC_NAME
:
3085 as_bad (_(".asmfunc repeated."));
3088 case WAITING_ENDASMFUNC
:
3089 as_bad (_(".asmfunc without function."));
3092 demand_empty_rest_of_line ();
3095 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3099 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3101 if (codecomposer_syntax
)
3103 switch (asmfunc_state
)
3105 case OUTSIDE_ASMFUNC
:
3106 as_bad (_(".endasmfunc without a .asmfunc."));
3109 case WAITING_ASMFUNC_NAME
:
3110 as_bad (_(".endasmfunc without function."));
3113 case WAITING_ENDASMFUNC
:
3114 asmfunc_state
= OUTSIDE_ASMFUNC
;
3115 asmfunc_debug (NULL
);
3118 demand_empty_rest_of_line ();
3121 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3125 s_ccs_def (int name
)
3127 if (codecomposer_syntax
)
3130 as_bad (_(".def pseudo-op only available with -mccs flag."));
3133 /* Directives: Literal pools. */
3135 static literal_pool
*
3136 find_literal_pool (void)
3138 literal_pool
* pool
;
3140 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3142 if (pool
->section
== now_seg
3143 && pool
->sub_section
== now_subseg
)
3150 static literal_pool
*
3151 find_or_make_literal_pool (void)
3153 /* Next literal pool ID number. */
3154 static unsigned int latest_pool_num
= 1;
3155 literal_pool
* pool
;
3157 pool
= find_literal_pool ();
3161 /* Create a new pool. */
3162 pool
= XNEW (literal_pool
);
3166 pool
->next_free_entry
= 0;
3167 pool
->section
= now_seg
;
3168 pool
->sub_section
= now_subseg
;
3169 pool
->next
= list_of_pools
;
3170 pool
->symbol
= NULL
;
3171 pool
->alignment
= 2;
3173 /* Add it to the list. */
3174 list_of_pools
= pool
;
3177 /* New pools, and emptied pools, will have a NULL symbol. */
3178 if (pool
->symbol
== NULL
)
3180 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3181 (valueT
) 0, &zero_address_frag
);
3182 pool
->id
= latest_pool_num
++;
3189 /* Add the literal in the global 'inst'
3190 structure to the relevant literal pool. */
3193 add_to_lit_pool (unsigned int nbytes
)
3195 #define PADDING_SLOT 0x1
3196 #define LIT_ENTRY_SIZE_MASK 0xFF
3197 literal_pool
* pool
;
3198 unsigned int entry
, pool_size
= 0;
3199 bfd_boolean padding_slot_p
= FALSE
;
3205 imm1
= inst
.operands
[1].imm
;
3206 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3207 : inst
.reloc
.exp
.X_unsigned
? 0
3208 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3209 if (target_big_endian
)
3212 imm2
= inst
.operands
[1].imm
;
3216 pool
= find_or_make_literal_pool ();
3218 /* Check if this literal value is already in the pool. */
3219 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3223 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3224 && (inst
.reloc
.exp
.X_op
== O_constant
)
3225 && (pool
->literals
[entry
].X_add_number
3226 == inst
.reloc
.exp
.X_add_number
)
3227 && (pool
->literals
[entry
].X_md
== nbytes
)
3228 && (pool
->literals
[entry
].X_unsigned
3229 == inst
.reloc
.exp
.X_unsigned
))
3232 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3233 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3234 && (pool
->literals
[entry
].X_add_number
3235 == inst
.reloc
.exp
.X_add_number
)
3236 && (pool
->literals
[entry
].X_add_symbol
3237 == inst
.reloc
.exp
.X_add_symbol
)
3238 && (pool
->literals
[entry
].X_op_symbol
3239 == inst
.reloc
.exp
.X_op_symbol
)
3240 && (pool
->literals
[entry
].X_md
== nbytes
))
3243 else if ((nbytes
== 8)
3244 && !(pool_size
& 0x7)
3245 && ((entry
+ 1) != pool
->next_free_entry
)
3246 && (pool
->literals
[entry
].X_op
== O_constant
)
3247 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3248 && (pool
->literals
[entry
].X_unsigned
3249 == inst
.reloc
.exp
.X_unsigned
)
3250 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3251 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3252 && (pool
->literals
[entry
+ 1].X_unsigned
3253 == inst
.reloc
.exp
.X_unsigned
))
3256 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3257 if (padding_slot_p
&& (nbytes
== 4))
3263 /* Do we need to create a new entry? */
3264 if (entry
== pool
->next_free_entry
)
3266 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3268 inst
.error
= _("literal pool overflow");
3274 /* For 8-byte entries, we align to an 8-byte boundary,
3275 and split it into two 4-byte entries, because on 32-bit
3276 host, 8-byte constants are treated as big num, thus
3277 saved in "generic_bignum" which will be overwritten
3278 by later assignments.
3280 We also need to make sure there is enough space for
3283 We also check to make sure the literal operand is a
3285 if (!(inst
.reloc
.exp
.X_op
== O_constant
3286 || inst
.reloc
.exp
.X_op
== O_big
))
3288 inst
.error
= _("invalid type for literal pool");
3291 else if (pool_size
& 0x7)
3293 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3295 inst
.error
= _("literal pool overflow");
3299 pool
->literals
[entry
] = inst
.reloc
.exp
;
3300 pool
->literals
[entry
].X_op
= O_constant
;
3301 pool
->literals
[entry
].X_add_number
= 0;
3302 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3303 pool
->next_free_entry
+= 1;
3306 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3308 inst
.error
= _("literal pool overflow");
3312 pool
->literals
[entry
] = inst
.reloc
.exp
;
3313 pool
->literals
[entry
].X_op
= O_constant
;
3314 pool
->literals
[entry
].X_add_number
= imm1
;
3315 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3316 pool
->literals
[entry
++].X_md
= 4;
3317 pool
->literals
[entry
] = inst
.reloc
.exp
;
3318 pool
->literals
[entry
].X_op
= O_constant
;
3319 pool
->literals
[entry
].X_add_number
= imm2
;
3320 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3321 pool
->literals
[entry
].X_md
= 4;
3322 pool
->alignment
= 3;
3323 pool
->next_free_entry
+= 1;
3327 pool
->literals
[entry
] = inst
.reloc
.exp
;
3328 pool
->literals
[entry
].X_md
= 4;
3332 /* PR ld/12974: Record the location of the first source line to reference
3333 this entry in the literal pool. If it turns out during linking that the
3334 symbol does not exist we will be able to give an accurate line number for
3335 the (first use of the) missing reference. */
3336 if (debug_type
== DEBUG_DWARF2
)
3337 dwarf2_where (pool
->locs
+ entry
);
3339 pool
->next_free_entry
+= 1;
3341 else if (padding_slot_p
)
3343 pool
->literals
[entry
] = inst
.reloc
.exp
;
3344 pool
->literals
[entry
].X_md
= nbytes
;
3347 inst
.reloc
.exp
.X_op
= O_symbol
;
3348 inst
.reloc
.exp
.X_add_number
= pool_size
;
3349 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3355 tc_start_label_without_colon (void)
3357 bfd_boolean ret
= TRUE
;
3359 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3361 const char *label
= input_line_pointer
;
3363 while (!is_end_of_line
[(int) label
[-1]])
3368 as_bad (_("Invalid label '%s'"), label
);
3372 asmfunc_debug (label
);
3374 asmfunc_state
= WAITING_ENDASMFUNC
;
3380 /* Can't use symbol_new here, so have to create a symbol and then at
3381 a later date assign it a value. Thats what these functions do. */
3384 symbol_locate (symbolS
* symbolP
,
3385 const char * name
, /* It is copied, the caller can modify. */
3386 segT segment
, /* Segment identifier (SEG_<something>). */
3387 valueT valu
, /* Symbol value. */
3388 fragS
* frag
) /* Associated fragment. */
3391 char * preserved_copy_of_name
;
3393 name_length
= strlen (name
) + 1; /* +1 for \0. */
3394 obstack_grow (¬es
, name
, name_length
);
3395 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3397 #ifdef tc_canonicalize_symbol_name
3398 preserved_copy_of_name
=
3399 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3402 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3404 S_SET_SEGMENT (symbolP
, segment
);
3405 S_SET_VALUE (symbolP
, valu
);
3406 symbol_clear_list_pointers (symbolP
);
3408 symbol_set_frag (symbolP
, frag
);
3410 /* Link to end of symbol chain. */
3412 extern int symbol_table_frozen
;
3414 if (symbol_table_frozen
)
3418 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3420 obj_symbol_new_hook (symbolP
);
3422 #ifdef tc_symbol_new_hook
3423 tc_symbol_new_hook (symbolP
);
3427 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3428 #endif /* DEBUG_SYMS */
3432 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3435 literal_pool
* pool
;
3438 pool
= find_literal_pool ();
3440 || pool
->symbol
== NULL
3441 || pool
->next_free_entry
== 0)
3444 /* Align pool as you have word accesses.
3445 Only make a frag if we have to. */
3447 frag_align (pool
->alignment
, 0, 0);
3449 record_alignment (now_seg
, 2);
3452 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3453 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3455 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3457 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3458 (valueT
) frag_now_fix (), frag_now
);
3459 symbol_table_insert (pool
->symbol
);
3461 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3463 #if defined OBJ_COFF || defined OBJ_ELF
3464 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3467 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3470 if (debug_type
== DEBUG_DWARF2
)
3471 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3473 /* First output the expression in the instruction to the pool. */
3474 emit_expr (&(pool
->literals
[entry
]),
3475 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3478 /* Mark the pool as empty. */
3479 pool
->next_free_entry
= 0;
3480 pool
->symbol
= NULL
;
3484 /* Forward declarations for functions below, in the MD interface
3486 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3487 static valueT
create_unwind_entry (int);
3488 static void start_unwind_section (const segT
, int);
3489 static void add_unwind_opcode (valueT
, int);
3490 static void flush_pending_unwind (void);
3492 /* Directives: Data. */
3495 s_arm_elf_cons (int nbytes
)
3499 #ifdef md_flush_pending_output
3500 md_flush_pending_output ();
3503 if (is_it_end_of_statement ())
3505 demand_empty_rest_of_line ();
3509 #ifdef md_cons_align
3510 md_cons_align (nbytes
);
3513 mapping_state (MAP_DATA
);
3517 char *base
= input_line_pointer
;
3521 if (exp
.X_op
!= O_symbol
)
3522 emit_expr (&exp
, (unsigned int) nbytes
);
3525 char *before_reloc
= input_line_pointer
;
3526 reloc
= parse_reloc (&input_line_pointer
);
3529 as_bad (_("unrecognized relocation suffix"));
3530 ignore_rest_of_line ();
3533 else if (reloc
== BFD_RELOC_UNUSED
)
3534 emit_expr (&exp
, (unsigned int) nbytes
);
3537 reloc_howto_type
*howto
= (reloc_howto_type
*)
3538 bfd_reloc_type_lookup (stdoutput
,
3539 (bfd_reloc_code_real_type
) reloc
);
3540 int size
= bfd_get_reloc_size (howto
);
3542 if (reloc
== BFD_RELOC_ARM_PLT32
)
3544 as_bad (_("(plt) is only valid on branch targets"));
3545 reloc
= BFD_RELOC_UNUSED
;
3550 as_bad (_("%s relocations do not fit in %d bytes"),
3551 howto
->name
, nbytes
);
3554 /* We've parsed an expression stopping at O_symbol.
3555 But there may be more expression left now that we
3556 have parsed the relocation marker. Parse it again.
3557 XXX Surely there is a cleaner way to do this. */
3558 char *p
= input_line_pointer
;
3560 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3562 memcpy (save_buf
, base
, input_line_pointer
- base
);
3563 memmove (base
+ (input_line_pointer
- before_reloc
),
3564 base
, before_reloc
- base
);
3566 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3568 memcpy (base
, save_buf
, p
- base
);
3570 offset
= nbytes
- size
;
3571 p
= frag_more (nbytes
);
3572 memset (p
, 0, nbytes
);
3573 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3574 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3580 while (*input_line_pointer
++ == ',');
3582 /* Put terminator back into stream. */
3583 input_line_pointer
--;
3584 demand_empty_rest_of_line ();
3587 /* Emit an expression containing a 32-bit thumb instruction.
3588 Implementation based on put_thumb32_insn. */
3591 emit_thumb32_expr (expressionS
* exp
)
3593 expressionS exp_high
= *exp
;
3595 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3596 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3597 exp
->X_add_number
&= 0xffff;
3598 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3601 /* Guess the instruction size based on the opcode. */
3604 thumb_insn_size (int opcode
)
3606 if ((unsigned int) opcode
< 0xe800u
)
3608 else if ((unsigned int) opcode
>= 0xe8000000u
)
3615 emit_insn (expressionS
*exp
, int nbytes
)
3619 if (exp
->X_op
== O_constant
)
3624 size
= thumb_insn_size (exp
->X_add_number
);
3628 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3630 as_bad (_(".inst.n operand too big. "\
3631 "Use .inst.w instead"));
3636 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3637 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3639 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3641 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3642 emit_thumb32_expr (exp
);
3644 emit_expr (exp
, (unsigned int) size
);
3646 it_fsm_post_encode ();
3650 as_bad (_("cannot determine Thumb instruction size. " \
3651 "Use .inst.n/.inst.w instead"));
3654 as_bad (_("constant expression required"));
3659 /* Like s_arm_elf_cons but do not use md_cons_align and
3660 set the mapping state to MAP_ARM/MAP_THUMB. */
3663 s_arm_elf_inst (int nbytes
)
3665 if (is_it_end_of_statement ())
3667 demand_empty_rest_of_line ();
3671 /* Calling mapping_state () here will not change ARM/THUMB,
3672 but will ensure not to be in DATA state. */
3675 mapping_state (MAP_THUMB
);
3680 as_bad (_("width suffixes are invalid in ARM mode"));
3681 ignore_rest_of_line ();
3687 mapping_state (MAP_ARM
);
3696 if (! emit_insn (& exp
, nbytes
))
3698 ignore_rest_of_line ();
3702 while (*input_line_pointer
++ == ',');
3704 /* Put terminator back into stream. */
3705 input_line_pointer
--;
3706 demand_empty_rest_of_line ();
3709 /* Parse a .rel31 directive. */
3712 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3719 if (*input_line_pointer
== '1')
3720 highbit
= 0x80000000;
3721 else if (*input_line_pointer
!= '0')
3722 as_bad (_("expected 0 or 1"));
3724 input_line_pointer
++;
3725 if (*input_line_pointer
!= ',')
3726 as_bad (_("missing comma"));
3727 input_line_pointer
++;
3729 #ifdef md_flush_pending_output
3730 md_flush_pending_output ();
3733 #ifdef md_cons_align
3737 mapping_state (MAP_DATA
);
3742 md_number_to_chars (p
, highbit
, 4);
3743 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3744 BFD_RELOC_ARM_PREL31
);
3746 demand_empty_rest_of_line ();
3749 /* Directives: AEABI stack-unwind tables. */
3751 /* Parse an unwind_fnstart directive. Simply records the current location. */
3754 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3756 demand_empty_rest_of_line ();
3757 if (unwind
.proc_start
)
3759 as_bad (_("duplicate .fnstart directive"));
3763 /* Mark the start of the function. */
3764 unwind
.proc_start
= expr_build_dot ();
3766 /* Reset the rest of the unwind info. */
3767 unwind
.opcode_count
= 0;
3768 unwind
.table_entry
= NULL
;
3769 unwind
.personality_routine
= NULL
;
3770 unwind
.personality_index
= -1;
3771 unwind
.frame_size
= 0;
3772 unwind
.fp_offset
= 0;
3773 unwind
.fp_reg
= REG_SP
;
3775 unwind
.sp_restored
= 0;
3779 /* Parse a handlerdata directive. Creates the exception handling table entry
3780 for the function. */
3783 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3785 demand_empty_rest_of_line ();
3786 if (!unwind
.proc_start
)
3787 as_bad (MISSING_FNSTART
);
3789 if (unwind
.table_entry
)
3790 as_bad (_("duplicate .handlerdata directive"));
3792 create_unwind_entry (1);
3795 /* Parse an unwind_fnend directive. Generates the index table entry. */
3798 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3803 unsigned int marked_pr_dependency
;
3805 demand_empty_rest_of_line ();
3807 if (!unwind
.proc_start
)
3809 as_bad (_(".fnend directive without .fnstart"));
3813 /* Add eh table entry. */
3814 if (unwind
.table_entry
== NULL
)
3815 val
= create_unwind_entry (0);
3819 /* Add index table entry. This is two words. */
3820 start_unwind_section (unwind
.saved_seg
, 1);
3821 frag_align (2, 0, 0);
3822 record_alignment (now_seg
, 2);
3824 ptr
= frag_more (8);
3826 where
= frag_now_fix () - 8;
3828 /* Self relative offset of the function start. */
3829 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3830 BFD_RELOC_ARM_PREL31
);
3832 /* Indicate dependency on EHABI-defined personality routines to the
3833 linker, if it hasn't been done already. */
3834 marked_pr_dependency
3835 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3836 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3837 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3839 static const char *const name
[] =
3841 "__aeabi_unwind_cpp_pr0",
3842 "__aeabi_unwind_cpp_pr1",
3843 "__aeabi_unwind_cpp_pr2"
3845 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3846 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3847 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3848 |= 1 << unwind
.personality_index
;
3852 /* Inline exception table entry. */
3853 md_number_to_chars (ptr
+ 4, val
, 4);
3855 /* Self relative offset of the table entry. */
3856 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3857 BFD_RELOC_ARM_PREL31
);
3859 /* Restore the original section. */
3860 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3862 unwind
.proc_start
= NULL
;
3866 /* Parse an unwind_cantunwind directive. */
3869 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3871 demand_empty_rest_of_line ();
3872 if (!unwind
.proc_start
)
3873 as_bad (MISSING_FNSTART
);
3875 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3876 as_bad (_("personality routine specified for cantunwind frame"));
3878 unwind
.personality_index
= -2;
3882 /* Parse a personalityindex directive. */
3885 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3889 if (!unwind
.proc_start
)
3890 as_bad (MISSING_FNSTART
);
3892 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3893 as_bad (_("duplicate .personalityindex directive"));
3897 if (exp
.X_op
!= O_constant
3898 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3900 as_bad (_("bad personality routine number"));
3901 ignore_rest_of_line ();
3905 unwind
.personality_index
= exp
.X_add_number
;
3907 demand_empty_rest_of_line ();
3911 /* Parse a personality directive. */
3914 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3918 if (!unwind
.proc_start
)
3919 as_bad (MISSING_FNSTART
);
3921 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3922 as_bad (_("duplicate .personality directive"));
3924 c
= get_symbol_name (& name
);
3925 p
= input_line_pointer
;
3927 ++ input_line_pointer
;
3928 unwind
.personality_routine
= symbol_find_or_make (name
);
3930 demand_empty_rest_of_line ();
3934 /* Parse a directive saving core registers. */
3937 s_arm_unwind_save_core (void)
3943 range
= parse_reg_list (&input_line_pointer
);
3946 as_bad (_("expected register list"));
3947 ignore_rest_of_line ();
3951 demand_empty_rest_of_line ();
3953 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3954 into .unwind_save {..., sp...}. We aren't bothered about the value of
3955 ip because it is clobbered by calls. */
3956 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3957 && (range
& 0x3000) == 0x1000)
3959 unwind
.opcode_count
--;
3960 unwind
.sp_restored
= 0;
3961 range
= (range
| 0x2000) & ~0x1000;
3962 unwind
.pending_offset
= 0;
3968 /* See if we can use the short opcodes. These pop a block of up to 8
3969 registers starting with r4, plus maybe r14. */
3970 for (n
= 0; n
< 8; n
++)
3972 /* Break at the first non-saved register. */
3973 if ((range
& (1 << (n
+ 4))) == 0)
3976 /* See if there are any other bits set. */
3977 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3979 /* Use the long form. */
3980 op
= 0x8000 | ((range
>> 4) & 0xfff);
3981 add_unwind_opcode (op
, 2);
3985 /* Use the short form. */
3987 op
= 0xa8; /* Pop r14. */
3989 op
= 0xa0; /* Do not pop r14. */
3991 add_unwind_opcode (op
, 1);
3998 op
= 0xb100 | (range
& 0xf);
3999 add_unwind_opcode (op
, 2);
4002 /* Record the number of bytes pushed. */
4003 for (n
= 0; n
< 16; n
++)
4005 if (range
& (1 << n
))
4006 unwind
.frame_size
+= 4;
4011 /* Parse a directive saving FPA registers. */
4014 s_arm_unwind_save_fpa (int reg
)
4020 /* Get Number of registers to transfer. */
4021 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4024 exp
.X_op
= O_illegal
;
4026 if (exp
.X_op
!= O_constant
)
4028 as_bad (_("expected , <constant>"));
4029 ignore_rest_of_line ();
4033 num_regs
= exp
.X_add_number
;
4035 if (num_regs
< 1 || num_regs
> 4)
4037 as_bad (_("number of registers must be in the range [1:4]"));
4038 ignore_rest_of_line ();
4042 demand_empty_rest_of_line ();
4047 op
= 0xb4 | (num_regs
- 1);
4048 add_unwind_opcode (op
, 1);
4053 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4054 add_unwind_opcode (op
, 2);
4056 unwind
.frame_size
+= num_regs
* 12;
4060 /* Parse a directive saving VFP registers for ARMv6 and above. */
4063 s_arm_unwind_save_vfp_armv6 (void)
4068 int num_vfpv3_regs
= 0;
4069 int num_regs_below_16
;
4071 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4074 as_bad (_("expected register list"));
4075 ignore_rest_of_line ();
4079 demand_empty_rest_of_line ();
4081 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4082 than FSTMX/FLDMX-style ones). */
4084 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4086 num_vfpv3_regs
= count
;
4087 else if (start
+ count
> 16)
4088 num_vfpv3_regs
= start
+ count
- 16;
4090 if (num_vfpv3_regs
> 0)
4092 int start_offset
= start
> 16 ? start
- 16 : 0;
4093 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4094 add_unwind_opcode (op
, 2);
4097 /* Generate opcode for registers numbered in the range 0 .. 15. */
4098 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4099 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4100 if (num_regs_below_16
> 0)
4102 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4103 add_unwind_opcode (op
, 2);
4106 unwind
.frame_size
+= count
* 8;
4110 /* Parse a directive saving VFP registers for pre-ARMv6. */
4113 s_arm_unwind_save_vfp (void)
4119 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4122 as_bad (_("expected register list"));
4123 ignore_rest_of_line ();
4127 demand_empty_rest_of_line ();
4132 op
= 0xb8 | (count
- 1);
4133 add_unwind_opcode (op
, 1);
4138 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4139 add_unwind_opcode (op
, 2);
4141 unwind
.frame_size
+= count
* 8 + 4;
4145 /* Parse a directive saving iWMMXt data registers. */
4148 s_arm_unwind_save_mmxwr (void)
4156 if (*input_line_pointer
== '{')
4157 input_line_pointer
++;
4161 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4165 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4170 as_tsktsk (_("register list not in ascending order"));
4173 if (*input_line_pointer
== '-')
4175 input_line_pointer
++;
4176 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4179 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4182 else if (reg
>= hi_reg
)
4184 as_bad (_("bad register range"));
4187 for (; reg
< hi_reg
; reg
++)
4191 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4193 skip_past_char (&input_line_pointer
, '}');
4195 demand_empty_rest_of_line ();
4197 /* Generate any deferred opcodes because we're going to be looking at
4199 flush_pending_unwind ();
4201 for (i
= 0; i
< 16; i
++)
4203 if (mask
& (1 << i
))
4204 unwind
.frame_size
+= 8;
4207 /* Attempt to combine with a previous opcode. We do this because gcc
4208 likes to output separate unwind directives for a single block of
4210 if (unwind
.opcode_count
> 0)
4212 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4213 if ((i
& 0xf8) == 0xc0)
4216 /* Only merge if the blocks are contiguous. */
4219 if ((mask
& 0xfe00) == (1 << 9))
4221 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4222 unwind
.opcode_count
--;
4225 else if (i
== 6 && unwind
.opcode_count
>= 2)
4227 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4231 op
= 0xffff << (reg
- 1);
4233 && ((mask
& op
) == (1u << (reg
- 1))))
4235 op
= (1 << (reg
+ i
+ 1)) - 1;
4236 op
&= ~((1 << reg
) - 1);
4238 unwind
.opcode_count
-= 2;
4245 /* We want to generate opcodes in the order the registers have been
4246 saved, ie. descending order. */
4247 for (reg
= 15; reg
>= -1; reg
--)
4249 /* Save registers in blocks. */
4251 || !(mask
& (1 << reg
)))
4253 /* We found an unsaved reg. Generate opcodes to save the
4260 op
= 0xc0 | (hi_reg
- 10);
4261 add_unwind_opcode (op
, 1);
4266 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4267 add_unwind_opcode (op
, 2);
4276 ignore_rest_of_line ();
4280 s_arm_unwind_save_mmxwcg (void)
4287 if (*input_line_pointer
== '{')
4288 input_line_pointer
++;
4290 skip_whitespace (input_line_pointer
);
4294 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4298 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4304 as_tsktsk (_("register list not in ascending order"));
4307 if (*input_line_pointer
== '-')
4309 input_line_pointer
++;
4310 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4313 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4316 else if (reg
>= hi_reg
)
4318 as_bad (_("bad register range"));
4321 for (; reg
< hi_reg
; reg
++)
4325 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4327 skip_past_char (&input_line_pointer
, '}');
4329 demand_empty_rest_of_line ();
4331 /* Generate any deferred opcodes because we're going to be looking at
4333 flush_pending_unwind ();
4335 for (reg
= 0; reg
< 16; reg
++)
4337 if (mask
& (1 << reg
))
4338 unwind
.frame_size
+= 4;
4341 add_unwind_opcode (op
, 2);
4344 ignore_rest_of_line ();
4348 /* Parse an unwind_save directive.
4349 If the argument is non-zero, this is a .vsave directive. */
4352 s_arm_unwind_save (int arch_v6
)
4355 struct reg_entry
*reg
;
4356 bfd_boolean had_brace
= FALSE
;
4358 if (!unwind
.proc_start
)
4359 as_bad (MISSING_FNSTART
);
4361 /* Figure out what sort of save we have. */
4362 peek
= input_line_pointer
;
4370 reg
= arm_reg_parse_multi (&peek
);
4374 as_bad (_("register expected"));
4375 ignore_rest_of_line ();
4384 as_bad (_("FPA .unwind_save does not take a register list"));
4385 ignore_rest_of_line ();
4388 input_line_pointer
= peek
;
4389 s_arm_unwind_save_fpa (reg
->number
);
4393 s_arm_unwind_save_core ();
4398 s_arm_unwind_save_vfp_armv6 ();
4400 s_arm_unwind_save_vfp ();
4403 case REG_TYPE_MMXWR
:
4404 s_arm_unwind_save_mmxwr ();
4407 case REG_TYPE_MMXWCG
:
4408 s_arm_unwind_save_mmxwcg ();
4412 as_bad (_(".unwind_save does not support this kind of register"));
4413 ignore_rest_of_line ();
4418 /* Parse an unwind_movsp directive. */
4421 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4427 if (!unwind
.proc_start
)
4428 as_bad (MISSING_FNSTART
);
4430 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4433 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4434 ignore_rest_of_line ();
4438 /* Optional constant. */
4439 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4441 if (immediate_for_directive (&offset
) == FAIL
)
4447 demand_empty_rest_of_line ();
4449 if (reg
== REG_SP
|| reg
== REG_PC
)
4451 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4455 if (unwind
.fp_reg
!= REG_SP
)
4456 as_bad (_("unexpected .unwind_movsp directive"));
4458 /* Generate opcode to restore the value. */
4460 add_unwind_opcode (op
, 1);
4462 /* Record the information for later. */
4463 unwind
.fp_reg
= reg
;
4464 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4465 unwind
.sp_restored
= 1;
4468 /* Parse an unwind_pad directive. */
4471 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4475 if (!unwind
.proc_start
)
4476 as_bad (MISSING_FNSTART
);
4478 if (immediate_for_directive (&offset
) == FAIL
)
4483 as_bad (_("stack increment must be multiple of 4"));
4484 ignore_rest_of_line ();
4488 /* Don't generate any opcodes, just record the details for later. */
4489 unwind
.frame_size
+= offset
;
4490 unwind
.pending_offset
+= offset
;
4492 demand_empty_rest_of_line ();
4495 /* Parse an unwind_setfp directive. */
4498 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4504 if (!unwind
.proc_start
)
4505 as_bad (MISSING_FNSTART
);
4507 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4508 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4511 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4513 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4515 as_bad (_("expected <reg>, <reg>"));
4516 ignore_rest_of_line ();
4520 /* Optional constant. */
4521 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4523 if (immediate_for_directive (&offset
) == FAIL
)
4529 demand_empty_rest_of_line ();
4531 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4533 as_bad (_("register must be either sp or set by a previous"
4534 "unwind_movsp directive"));
4538 /* Don't generate any opcodes, just record the information for later. */
4539 unwind
.fp_reg
= fp_reg
;
4541 if (sp_reg
== REG_SP
)
4542 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4544 unwind
.fp_offset
-= offset
;
4547 /* Parse an unwind_raw directive. */
4550 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4553 /* This is an arbitrary limit. */
4554 unsigned char op
[16];
4557 if (!unwind
.proc_start
)
4558 as_bad (MISSING_FNSTART
);
4561 if (exp
.X_op
== O_constant
4562 && skip_past_comma (&input_line_pointer
) != FAIL
)
4564 unwind
.frame_size
+= exp
.X_add_number
;
4568 exp
.X_op
= O_illegal
;
4570 if (exp
.X_op
!= O_constant
)
4572 as_bad (_("expected <offset>, <opcode>"));
4573 ignore_rest_of_line ();
4579 /* Parse the opcode. */
4584 as_bad (_("unwind opcode too long"));
4585 ignore_rest_of_line ();
4587 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4589 as_bad (_("invalid unwind opcode"));
4590 ignore_rest_of_line ();
4593 op
[count
++] = exp
.X_add_number
;
4595 /* Parse the next byte. */
4596 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4602 /* Add the opcode bytes in reverse order. */
4604 add_unwind_opcode (op
[count
], 1);
4606 demand_empty_rest_of_line ();
4610 /* Parse a .eabi_attribute directive. */
4613 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4615 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4617 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4618 attributes_set_explicitly
[tag
] = 1;
4621 /* Emit a tls fix for the symbol. */
4624 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4628 #ifdef md_flush_pending_output
4629 md_flush_pending_output ();
4632 #ifdef md_cons_align
4636 /* Since we're just labelling the code, there's no need to define a
4639 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4640 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4641 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4642 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4644 #endif /* OBJ_ELF */
4646 static void s_arm_arch (int);
4647 static void s_arm_object_arch (int);
4648 static void s_arm_cpu (int);
4649 static void s_arm_fpu (int);
4650 static void s_arm_arch_extension (int);
4655 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4662 if (exp
.X_op
== O_symbol
)
4663 exp
.X_op
= O_secrel
;
4665 emit_expr (&exp
, 4);
4667 while (*input_line_pointer
++ == ',');
4669 input_line_pointer
--;
4670 demand_empty_rest_of_line ();
4674 /* This table describes all the machine specific pseudo-ops the assembler
4675 has to support. The fields are:
4676 pseudo-op name without dot
4677 function to call to execute this pseudo-op
4678 Integer arg to pass to the function. */
4680 const pseudo_typeS md_pseudo_table
[] =
4682 /* Never called because '.req' does not start a line. */
4683 { "req", s_req
, 0 },
4684 /* Following two are likewise never called. */
4687 { "unreq", s_unreq
, 0 },
4688 { "bss", s_bss
, 0 },
4689 { "align", s_align_ptwo
, 2 },
4690 { "arm", s_arm
, 0 },
4691 { "thumb", s_thumb
, 0 },
4692 { "code", s_code
, 0 },
4693 { "force_thumb", s_force_thumb
, 0 },
4694 { "thumb_func", s_thumb_func
, 0 },
4695 { "thumb_set", s_thumb_set
, 0 },
4696 { "even", s_even
, 0 },
4697 { "ltorg", s_ltorg
, 0 },
4698 { "pool", s_ltorg
, 0 },
4699 { "syntax", s_syntax
, 0 },
4700 { "cpu", s_arm_cpu
, 0 },
4701 { "arch", s_arm_arch
, 0 },
4702 { "object_arch", s_arm_object_arch
, 0 },
4703 { "fpu", s_arm_fpu
, 0 },
4704 { "arch_extension", s_arm_arch_extension
, 0 },
4706 { "word", s_arm_elf_cons
, 4 },
4707 { "long", s_arm_elf_cons
, 4 },
4708 { "inst.n", s_arm_elf_inst
, 2 },
4709 { "inst.w", s_arm_elf_inst
, 4 },
4710 { "inst", s_arm_elf_inst
, 0 },
4711 { "rel31", s_arm_rel31
, 0 },
4712 { "fnstart", s_arm_unwind_fnstart
, 0 },
4713 { "fnend", s_arm_unwind_fnend
, 0 },
4714 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4715 { "personality", s_arm_unwind_personality
, 0 },
4716 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4717 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4718 { "save", s_arm_unwind_save
, 0 },
4719 { "vsave", s_arm_unwind_save
, 1 },
4720 { "movsp", s_arm_unwind_movsp
, 0 },
4721 { "pad", s_arm_unwind_pad
, 0 },
4722 { "setfp", s_arm_unwind_setfp
, 0 },
4723 { "unwind_raw", s_arm_unwind_raw
, 0 },
4724 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4725 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4729 /* These are used for dwarf. */
4733 /* These are used for dwarf2. */
4734 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4735 { "loc", dwarf2_directive_loc
, 0 },
4736 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4738 { "extend", float_cons
, 'x' },
4739 { "ldouble", float_cons
, 'x' },
4740 { "packed", float_cons
, 'p' },
4742 {"secrel32", pe_directive_secrel
, 0},
4745 /* These are for compatibility with CodeComposer Studio. */
4746 {"ref", s_ccs_ref
, 0},
4747 {"def", s_ccs_def
, 0},
4748 {"asmfunc", s_ccs_asmfunc
, 0},
4749 {"endasmfunc", s_ccs_endasmfunc
, 0},
4754 /* Parser functions used exclusively in instruction operands. */
4756 /* Generic immediate-value read function for use in insn parsing.
4757 STR points to the beginning of the immediate (the leading #);
4758 VAL receives the value; if the value is outside [MIN, MAX]
4759 issue an error. PREFIX_OPT is true if the immediate prefix is
4763 parse_immediate (char **str
, int *val
, int min
, int max
,
4764 bfd_boolean prefix_opt
)
4767 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4768 if (exp
.X_op
!= O_constant
)
4770 inst
.error
= _("constant expression required");
4774 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4776 inst
.error
= _("immediate value out of range");
4780 *val
= exp
.X_add_number
;
4784 /* Less-generic immediate-value read function with the possibility of loading a
4785 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4786 instructions. Puts the result directly in inst.operands[i]. */
4789 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4790 bfd_boolean allow_symbol_p
)
4793 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4796 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4798 if (exp_p
->X_op
== O_constant
)
4800 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4801 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4802 O_constant. We have to be careful not to break compilation for
4803 32-bit X_add_number, though. */
4804 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4806 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4807 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4809 inst
.operands
[i
].regisimm
= 1;
4812 else if (exp_p
->X_op
== O_big
4813 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4815 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4817 /* Bignums have their least significant bits in
4818 generic_bignum[0]. Make sure we put 32 bits in imm and
4819 32 bits in reg, in a (hopefully) portable way. */
4820 gas_assert (parts
!= 0);
4822 /* Make sure that the number is not too big.
4823 PR 11972: Bignums can now be sign-extended to the
4824 size of a .octa so check that the out of range bits
4825 are all zero or all one. */
4826 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4828 LITTLENUM_TYPE m
= -1;
4830 if (generic_bignum
[parts
* 2] != 0
4831 && generic_bignum
[parts
* 2] != m
)
4834 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4835 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4839 inst
.operands
[i
].imm
= 0;
4840 for (j
= 0; j
< parts
; j
++, idx
++)
4841 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4842 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4843 inst
.operands
[i
].reg
= 0;
4844 for (j
= 0; j
< parts
; j
++, idx
++)
4845 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4846 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4847 inst
.operands
[i
].regisimm
= 1;
4849 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4857 /* Returns the pseudo-register number of an FPA immediate constant,
4858 or FAIL if there isn't a valid constant here. */
4861 parse_fpa_immediate (char ** str
)
4863 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4869 /* First try and match exact strings, this is to guarantee
4870 that some formats will work even for cross assembly. */
4872 for (i
= 0; fp_const
[i
]; i
++)
4874 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4878 *str
+= strlen (fp_const
[i
]);
4879 if (is_end_of_line
[(unsigned char) **str
])
4885 /* Just because we didn't get a match doesn't mean that the constant
4886 isn't valid, just that it is in a format that we don't
4887 automatically recognize. Try parsing it with the standard
4888 expression routines. */
4890 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4892 /* Look for a raw floating point number. */
4893 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4894 && is_end_of_line
[(unsigned char) *save_in
])
4896 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4898 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4900 if (words
[j
] != fp_values
[i
][j
])
4904 if (j
== MAX_LITTLENUMS
)
4912 /* Try and parse a more complex expression, this will probably fail
4913 unless the code uses a floating point prefix (eg "0f"). */
4914 save_in
= input_line_pointer
;
4915 input_line_pointer
= *str
;
4916 if (expression (&exp
) == absolute_section
4917 && exp
.X_op
== O_big
4918 && exp
.X_add_number
< 0)
4920 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4922 #define X_PRECISION 5
4923 #define E_PRECISION 15L
4924 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4926 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4928 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4930 if (words
[j
] != fp_values
[i
][j
])
4934 if (j
== MAX_LITTLENUMS
)
4936 *str
= input_line_pointer
;
4937 input_line_pointer
= save_in
;
4944 *str
= input_line_pointer
;
4945 input_line_pointer
= save_in
;
4946 inst
.error
= _("invalid FPA immediate expression");
4950 /* Returns 1 if a number has "quarter-precision" float format
4951 0baBbbbbbc defgh000 00000000 00000000. */
4954 is_quarter_float (unsigned imm
)
4956 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4957 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4961 /* Detect the presence of a floating point or integer zero constant,
4965 parse_ifimm_zero (char **in
)
4969 if (!is_immediate_prefix (**in
))
4974 /* Accept #0x0 as a synonym for #0. */
4975 if (strncmp (*in
, "0x", 2) == 0)
4978 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4983 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4984 &generic_floating_point_number
);
4987 && generic_floating_point_number
.sign
== '+'
4988 && (generic_floating_point_number
.low
4989 > generic_floating_point_number
.leader
))
4995 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4996 0baBbbbbbc defgh000 00000000 00000000.
4997 The zero and minus-zero cases need special handling, since they can't be
4998 encoded in the "quarter-precision" float format, but can nonetheless be
4999 loaded as integer constants. */
5002 parse_qfloat_immediate (char **ccp
, int *immed
)
5006 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5007 int found_fpchar
= 0;
5009 skip_past_char (&str
, '#');
5011 /* We must not accidentally parse an integer as a floating-point number. Make
5012 sure that the value we parse is not an integer by checking for special
5013 characters '.' or 'e'.
5014 FIXME: This is a horrible hack, but doing better is tricky because type
5015 information isn't in a very usable state at parse time. */
5017 skip_whitespace (fpnum
);
5019 if (strncmp (fpnum
, "0x", 2) == 0)
5023 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5024 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5034 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5036 unsigned fpword
= 0;
5039 /* Our FP word must be 32 bits (single-precision FP). */
5040 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5042 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5046 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5059 /* Shift operands. */
5062 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5065 struct asm_shift_name
5068 enum shift_kind kind
;
5071 /* Third argument to parse_shift. */
5072 enum parse_shift_mode
5074 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5075 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5076 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5077 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5078 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5081 /* Parse a <shift> specifier on an ARM data processing instruction.
5082 This has three forms:
5084 (LSL|LSR|ASL|ASR|ROR) Rs
5085 (LSL|LSR|ASL|ASR|ROR) #imm
5088 Note that ASL is assimilated to LSL in the instruction encoding, and
5089 RRX to ROR #0 (which cannot be written as such). */
5092 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5094 const struct asm_shift_name
*shift_name
;
5095 enum shift_kind shift
;
5100 for (p
= *str
; ISALPHA (*p
); p
++)
5105 inst
.error
= _("shift expression expected");
5109 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5112 if (shift_name
== NULL
)
5114 inst
.error
= _("shift expression expected");
5118 shift
= shift_name
->kind
;
5122 case NO_SHIFT_RESTRICT
:
5123 case SHIFT_IMMEDIATE
: break;
5125 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5126 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5128 inst
.error
= _("'LSL' or 'ASR' required");
5133 case SHIFT_LSL_IMMEDIATE
:
5134 if (shift
!= SHIFT_LSL
)
5136 inst
.error
= _("'LSL' required");
5141 case SHIFT_ASR_IMMEDIATE
:
5142 if (shift
!= SHIFT_ASR
)
5144 inst
.error
= _("'ASR' required");
5152 if (shift
!= SHIFT_RRX
)
5154 /* Whitespace can appear here if the next thing is a bare digit. */
5155 skip_whitespace (p
);
5157 if (mode
== NO_SHIFT_RESTRICT
5158 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5160 inst
.operands
[i
].imm
= reg
;
5161 inst
.operands
[i
].immisreg
= 1;
5163 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5166 inst
.operands
[i
].shift_kind
= shift
;
5167 inst
.operands
[i
].shifted
= 1;
5172 /* Parse a <shifter_operand> for an ARM data processing instruction:
5175 #<immediate>, <rotate>
5179 where <shift> is defined by parse_shift above, and <rotate> is a
5180 multiple of 2 between 0 and 30. Validation of immediate operands
5181 is deferred to md_apply_fix. */
5184 parse_shifter_operand (char **str
, int i
)
5189 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5191 inst
.operands
[i
].reg
= value
;
5192 inst
.operands
[i
].isreg
= 1;
5194 /* parse_shift will override this if appropriate */
5195 inst
.reloc
.exp
.X_op
= O_constant
;
5196 inst
.reloc
.exp
.X_add_number
= 0;
5198 if (skip_past_comma (str
) == FAIL
)
5201 /* Shift operation on register. */
5202 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5205 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5208 if (skip_past_comma (str
) == SUCCESS
)
5210 /* #x, y -- ie explicit rotation by Y. */
5211 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5214 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5216 inst
.error
= _("constant expression expected");
5220 value
= exp
.X_add_number
;
5221 if (value
< 0 || value
> 30 || value
% 2 != 0)
5223 inst
.error
= _("invalid rotation");
5226 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5228 inst
.error
= _("invalid constant");
5232 /* Encode as specified. */
5233 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5237 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5238 inst
.reloc
.pc_rel
= 0;
5242 /* Group relocation information. Each entry in the table contains the
5243 textual name of the relocation as may appear in assembler source
5244 and must end with a colon.
5245 Along with this textual name are the relocation codes to be used if
5246 the corresponding instruction is an ALU instruction (ADD or SUB only),
5247 an LDR, an LDRS, or an LDC. */
5249 struct group_reloc_table_entry
5260 /* Varieties of non-ALU group relocation. */
5267 static struct group_reloc_table_entry group_reloc_table
[] =
5268 { /* Program counter relative: */
5270 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5275 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5276 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5277 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5278 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5280 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5285 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5286 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5287 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5288 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5290 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5291 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5292 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5293 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5294 /* Section base relative */
5296 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5301 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5302 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5303 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5304 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5306 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5311 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5312 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5313 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5314 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5316 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5317 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5318 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5319 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5320 /* Absolute thumb alu relocations. */
5322 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5327 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5332 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5337 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5342 /* Given the address of a pointer pointing to the textual name of a group
5343 relocation as may appear in assembler source, attempt to find its details
5344 in group_reloc_table. The pointer will be updated to the character after
5345 the trailing colon. On failure, FAIL will be returned; SUCCESS
5346 otherwise. On success, *entry will be updated to point at the relevant
5347 group_reloc_table entry. */
5350 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5353 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5355 int length
= strlen (group_reloc_table
[i
].name
);
5357 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5358 && (*str
)[length
] == ':')
5360 *out
= &group_reloc_table
[i
];
5361 *str
+= (length
+ 1);
5369 /* Parse a <shifter_operand> for an ARM data processing instruction
5370 (as for parse_shifter_operand) where group relocations are allowed:
5373 #<immediate>, <rotate>
5374 #:<group_reloc>:<expression>
5378 where <group_reloc> is one of the strings defined in group_reloc_table.
5379 The hashes are optional.
5381 Everything else is as for parse_shifter_operand. */
5383 static parse_operand_result
5384 parse_shifter_operand_group_reloc (char **str
, int i
)
5386 /* Determine if we have the sequence of characters #: or just :
5387 coming next. If we do, then we check for a group relocation.
5388 If we don't, punt the whole lot to parse_shifter_operand. */
5390 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5391 || (*str
)[0] == ':')
5393 struct group_reloc_table_entry
*entry
;
5395 if ((*str
)[0] == '#')
5400 /* Try to parse a group relocation. Anything else is an error. */
5401 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5403 inst
.error
= _("unknown group relocation");
5404 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5407 /* We now have the group relocation table entry corresponding to
5408 the name in the assembler source. Next, we parse the expression. */
5409 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5410 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5412 /* Record the relocation type (always the ALU variant here). */
5413 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5414 gas_assert (inst
.reloc
.type
!= 0);
5416 return PARSE_OPERAND_SUCCESS
;
5419 return parse_shifter_operand (str
, i
) == SUCCESS
5420 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5422 /* Never reached. */
5425 /* Parse a Neon alignment expression. Information is written to
5426 inst.operands[i]. We assume the initial ':' has been skipped.
5428 align .imm = align << 8, .immisalign=1, .preind=0 */
5429 static parse_operand_result
5430 parse_neon_alignment (char **str
, int i
)
5435 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5437 if (exp
.X_op
!= O_constant
)
5439 inst
.error
= _("alignment must be constant");
5440 return PARSE_OPERAND_FAIL
;
5443 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5444 inst
.operands
[i
].immisalign
= 1;
5445 /* Alignments are not pre-indexes. */
5446 inst
.operands
[i
].preind
= 0;
5449 return PARSE_OPERAND_SUCCESS
;
5452 /* Parse all forms of an ARM address expression. Information is written
5453 to inst.operands[i] and/or inst.reloc.
5455 Preindexed addressing (.preind=1):
5457 [Rn, #offset] .reg=Rn .reloc.exp=offset
5458 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5459 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5460 .shift_kind=shift .reloc.exp=shift_imm
5462 These three may have a trailing ! which causes .writeback to be set also.
5464 Postindexed addressing (.postind=1, .writeback=1):
5466 [Rn], #offset .reg=Rn .reloc.exp=offset
5467 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5468 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5469 .shift_kind=shift .reloc.exp=shift_imm
5471 Unindexed addressing (.preind=0, .postind=0):
5473 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5477 [Rn]{!} shorthand for [Rn,#0]{!}
5478 =immediate .isreg=0 .reloc.exp=immediate
5479 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5481 It is the caller's responsibility to check for addressing modes not
5482 supported by the instruction, and to set inst.reloc.type. */
5484 static parse_operand_result
5485 parse_address_main (char **str
, int i
, int group_relocations
,
5486 group_reloc_type group_type
)
5491 if (skip_past_char (&p
, '[') == FAIL
)
5493 if (skip_past_char (&p
, '=') == FAIL
)
5495 /* Bare address - translate to PC-relative offset. */
5496 inst
.reloc
.pc_rel
= 1;
5497 inst
.operands
[i
].reg
= REG_PC
;
5498 inst
.operands
[i
].isreg
= 1;
5499 inst
.operands
[i
].preind
= 1;
5501 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5502 return PARSE_OPERAND_FAIL
;
5504 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5505 /*allow_symbol_p=*/TRUE
))
5506 return PARSE_OPERAND_FAIL
;
5509 return PARSE_OPERAND_SUCCESS
;
5512 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5513 skip_whitespace (p
);
5515 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5517 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5518 return PARSE_OPERAND_FAIL
;
5520 inst
.operands
[i
].reg
= reg
;
5521 inst
.operands
[i
].isreg
= 1;
5523 if (skip_past_comma (&p
) == SUCCESS
)
5525 inst
.operands
[i
].preind
= 1;
5528 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5530 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5532 inst
.operands
[i
].imm
= reg
;
5533 inst
.operands
[i
].immisreg
= 1;
5535 if (skip_past_comma (&p
) == SUCCESS
)
5536 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5537 return PARSE_OPERAND_FAIL
;
5539 else if (skip_past_char (&p
, ':') == SUCCESS
)
5541 /* FIXME: '@' should be used here, but it's filtered out by generic
5542 code before we get to see it here. This may be subject to
5544 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5546 if (result
!= PARSE_OPERAND_SUCCESS
)
5551 if (inst
.operands
[i
].negative
)
5553 inst
.operands
[i
].negative
= 0;
5557 if (group_relocations
5558 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5560 struct group_reloc_table_entry
*entry
;
5562 /* Skip over the #: or : sequence. */
5568 /* Try to parse a group relocation. Anything else is an
5570 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5572 inst
.error
= _("unknown group relocation");
5573 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5576 /* We now have the group relocation table entry corresponding to
5577 the name in the assembler source. Next, we parse the
5579 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5580 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5582 /* Record the relocation type. */
5586 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5590 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5594 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5601 if (inst
.reloc
.type
== 0)
5603 inst
.error
= _("this group relocation is not allowed on this instruction");
5604 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5610 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5611 return PARSE_OPERAND_FAIL
;
5612 /* If the offset is 0, find out if it's a +0 or -0. */
5613 if (inst
.reloc
.exp
.X_op
== O_constant
5614 && inst
.reloc
.exp
.X_add_number
== 0)
5616 skip_whitespace (q
);
5620 skip_whitespace (q
);
5623 inst
.operands
[i
].negative
= 1;
5628 else if (skip_past_char (&p
, ':') == SUCCESS
)
5630 /* FIXME: '@' should be used here, but it's filtered out by generic code
5631 before we get to see it here. This may be subject to change. */
5632 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5634 if (result
!= PARSE_OPERAND_SUCCESS
)
5638 if (skip_past_char (&p
, ']') == FAIL
)
5640 inst
.error
= _("']' expected");
5641 return PARSE_OPERAND_FAIL
;
5644 if (skip_past_char (&p
, '!') == SUCCESS
)
5645 inst
.operands
[i
].writeback
= 1;
5647 else if (skip_past_comma (&p
) == SUCCESS
)
5649 if (skip_past_char (&p
, '{') == SUCCESS
)
5651 /* [Rn], {expr} - unindexed, with option */
5652 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5653 0, 255, TRUE
) == FAIL
)
5654 return PARSE_OPERAND_FAIL
;
5656 if (skip_past_char (&p
, '}') == FAIL
)
5658 inst
.error
= _("'}' expected at end of 'option' field");
5659 return PARSE_OPERAND_FAIL
;
5661 if (inst
.operands
[i
].preind
)
5663 inst
.error
= _("cannot combine index with option");
5664 return PARSE_OPERAND_FAIL
;
5667 return PARSE_OPERAND_SUCCESS
;
5671 inst
.operands
[i
].postind
= 1;
5672 inst
.operands
[i
].writeback
= 1;
5674 if (inst
.operands
[i
].preind
)
5676 inst
.error
= _("cannot combine pre- and post-indexing");
5677 return PARSE_OPERAND_FAIL
;
5681 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5683 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5685 /* We might be using the immediate for alignment already. If we
5686 are, OR the register number into the low-order bits. */
5687 if (inst
.operands
[i
].immisalign
)
5688 inst
.operands
[i
].imm
|= reg
;
5690 inst
.operands
[i
].imm
= reg
;
5691 inst
.operands
[i
].immisreg
= 1;
5693 if (skip_past_comma (&p
) == SUCCESS
)
5694 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5695 return PARSE_OPERAND_FAIL
;
5700 if (inst
.operands
[i
].negative
)
5702 inst
.operands
[i
].negative
= 0;
5705 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5706 return PARSE_OPERAND_FAIL
;
5707 /* If the offset is 0, find out if it's a +0 or -0. */
5708 if (inst
.reloc
.exp
.X_op
== O_constant
5709 && inst
.reloc
.exp
.X_add_number
== 0)
5711 skip_whitespace (q
);
5715 skip_whitespace (q
);
5718 inst
.operands
[i
].negative
= 1;
5724 /* If at this point neither .preind nor .postind is set, we have a
5725 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5726 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5728 inst
.operands
[i
].preind
= 1;
5729 inst
.reloc
.exp
.X_op
= O_constant
;
5730 inst
.reloc
.exp
.X_add_number
= 0;
5733 return PARSE_OPERAND_SUCCESS
;
5737 parse_address (char **str
, int i
)
5739 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5743 static parse_operand_result
5744 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5746 return parse_address_main (str
, i
, 1, type
);
5749 /* Parse an operand for a MOVW or MOVT instruction. */
5751 parse_half (char **str
)
5756 skip_past_char (&p
, '#');
5757 if (strncasecmp (p
, ":lower16:", 9) == 0)
5758 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5759 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5760 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5762 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5765 skip_whitespace (p
);
5768 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5771 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5773 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5775 inst
.error
= _("constant expression expected");
5778 if (inst
.reloc
.exp
.X_add_number
< 0
5779 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5781 inst
.error
= _("immediate value out of range");
5789 /* Miscellaneous. */
5791 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5792 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5794 parse_psr (char **str
, bfd_boolean lhs
)
5797 unsigned long psr_field
;
5798 const struct asm_psr
*psr
;
5800 bfd_boolean is_apsr
= FALSE
;
5801 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5803 /* PR gas/12698: If the user has specified -march=all then m_profile will
5804 be TRUE, but we want to ignore it in this case as we are building for any
5805 CPU type, including non-m variants. */
5806 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5809 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5810 feature for ease of use and backwards compatibility. */
5812 if (strncasecmp (p
, "SPSR", 4) == 0)
5815 goto unsupported_psr
;
5817 psr_field
= SPSR_BIT
;
5819 else if (strncasecmp (p
, "CPSR", 4) == 0)
5822 goto unsupported_psr
;
5826 else if (strncasecmp (p
, "APSR", 4) == 0)
5828 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5829 and ARMv7-R architecture CPUs. */
5838 while (ISALNUM (*p
) || *p
== '_');
5840 if (strncasecmp (start
, "iapsr", 5) == 0
5841 || strncasecmp (start
, "eapsr", 5) == 0
5842 || strncasecmp (start
, "xpsr", 4) == 0
5843 || strncasecmp (start
, "psr", 3) == 0)
5844 p
= start
+ strcspn (start
, "rR") + 1;
5846 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5852 /* If APSR is being written, a bitfield may be specified. Note that
5853 APSR itself is handled above. */
5854 if (psr
->field
<= 3)
5856 psr_field
= psr
->field
;
5862 /* M-profile MSR instructions have the mask field set to "10", except
5863 *PSR variants which modify APSR, which may use a different mask (and
5864 have been handled already). Do that by setting the PSR_f field
5866 return psr
->field
| (lhs
? PSR_f
: 0);
5869 goto unsupported_psr
;
5875 /* A suffix follows. */
5881 while (ISALNUM (*p
) || *p
== '_');
5885 /* APSR uses a notation for bits, rather than fields. */
5886 unsigned int nzcvq_bits
= 0;
5887 unsigned int g_bit
= 0;
5890 for (bit
= start
; bit
!= p
; bit
++)
5892 switch (TOLOWER (*bit
))
5895 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5899 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5903 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5907 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5911 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5915 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5919 inst
.error
= _("unexpected bit specified after APSR");
5924 if (nzcvq_bits
== 0x1f)
5929 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5931 inst
.error
= _("selected processor does not "
5932 "support DSP extension");
5939 if ((nzcvq_bits
& 0x20) != 0
5940 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5941 || (g_bit
& 0x2) != 0)
5943 inst
.error
= _("bad bitmask specified after APSR");
5949 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5954 psr_field
|= psr
->field
;
5960 goto error
; /* Garbage after "[CS]PSR". */
5962 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5963 is deprecated, but allow it anyway. */
5967 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5970 else if (!m_profile
)
5971 /* These bits are never right for M-profile devices: don't set them
5972 (only code paths which read/write APSR reach here). */
5973 psr_field
|= (PSR_c
| PSR_f
);
5979 inst
.error
= _("selected processor does not support requested special "
5980 "purpose register");
5984 inst
.error
= _("flag for {c}psr instruction expected");
5988 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5989 value suitable for splatting into the AIF field of the instruction. */
5992 parse_cps_flags (char **str
)
6001 case '\0': case ',':
6004 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6005 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6006 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6009 inst
.error
= _("unrecognized CPS flag");
6014 if (saw_a_flag
== 0)
6016 inst
.error
= _("missing CPS flags");
6024 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6025 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6028 parse_endian_specifier (char **str
)
6033 if (strncasecmp (s
, "BE", 2))
6035 else if (strncasecmp (s
, "LE", 2))
6039 inst
.error
= _("valid endian specifiers are be or le");
6043 if (ISALNUM (s
[2]) || s
[2] == '_')
6045 inst
.error
= _("valid endian specifiers are be or le");
6050 return little_endian
;
6053 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6054 value suitable for poking into the rotate field of an sxt or sxta
6055 instruction, or FAIL on error. */
6058 parse_ror (char **str
)
6063 if (strncasecmp (s
, "ROR", 3) == 0)
6067 inst
.error
= _("missing rotation field after comma");
6071 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6076 case 0: *str
= s
; return 0x0;
6077 case 8: *str
= s
; return 0x1;
6078 case 16: *str
= s
; return 0x2;
6079 case 24: *str
= s
; return 0x3;
6082 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6087 /* Parse a conditional code (from conds[] below). The value returned is in the
6088 range 0 .. 14, or FAIL. */
6090 parse_cond (char **str
)
6093 const struct asm_cond
*c
;
6095 /* Condition codes are always 2 characters, so matching up to
6096 3 characters is sufficient. */
6101 while (ISALPHA (*q
) && n
< 3)
6103 cond
[n
] = TOLOWER (*q
);
6108 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6111 inst
.error
= _("condition required");
6119 /* Record a use of the given feature. */
6121 record_feature_use (const arm_feature_set
*feature
)
6124 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6126 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6129 /* If the given feature available in the selected CPU, mark it as used.
6130 Returns TRUE iff feature is available. */
6132 mark_feature_used (const arm_feature_set
*feature
)
6134 /* Ensure the option is valid on the current architecture. */
6135 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6138 /* Add the appropriate architecture feature for the barrier option used.
6140 record_feature_use (feature
);
6145 /* Parse an option for a barrier instruction. Returns the encoding for the
6148 parse_barrier (char **str
)
6151 const struct asm_barrier_opt
*o
;
6154 while (ISALPHA (*q
))
6157 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6162 if (!mark_feature_used (&o
->arch
))
6169 /* Parse the operands of a table branch instruction. Similar to a memory
6172 parse_tb (char **str
)
6177 if (skip_past_char (&p
, '[') == FAIL
)
6179 inst
.error
= _("'[' expected");
6183 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6185 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6188 inst
.operands
[0].reg
= reg
;
6190 if (skip_past_comma (&p
) == FAIL
)
6192 inst
.error
= _("',' expected");
6196 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6198 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6201 inst
.operands
[0].imm
= reg
;
6203 if (skip_past_comma (&p
) == SUCCESS
)
6205 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6207 if (inst
.reloc
.exp
.X_add_number
!= 1)
6209 inst
.error
= _("invalid shift");
6212 inst
.operands
[0].shifted
= 1;
6215 if (skip_past_char (&p
, ']') == FAIL
)
6217 inst
.error
= _("']' expected");
6224 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6225 information on the types the operands can take and how they are encoded.
6226 Up to four operands may be read; this function handles setting the
6227 ".present" field for each read operand itself.
6228 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6229 else returns FAIL. */
6232 parse_neon_mov (char **str
, int *which_operand
)
6234 int i
= *which_operand
, val
;
6235 enum arm_reg_type rtype
;
6237 struct neon_type_el optype
;
6239 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6241 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6242 inst
.operands
[i
].reg
= val
;
6243 inst
.operands
[i
].isscalar
= 1;
6244 inst
.operands
[i
].vectype
= optype
;
6245 inst
.operands
[i
++].present
= 1;
6247 if (skip_past_comma (&ptr
) == FAIL
)
6250 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6253 inst
.operands
[i
].reg
= val
;
6254 inst
.operands
[i
].isreg
= 1;
6255 inst
.operands
[i
].present
= 1;
6257 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6260 /* Cases 0, 1, 2, 3, 5 (D only). */
6261 if (skip_past_comma (&ptr
) == FAIL
)
6264 inst
.operands
[i
].reg
= val
;
6265 inst
.operands
[i
].isreg
= 1;
6266 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6267 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6268 inst
.operands
[i
].isvec
= 1;
6269 inst
.operands
[i
].vectype
= optype
;
6270 inst
.operands
[i
++].present
= 1;
6272 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6274 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6275 Case 13: VMOV <Sd>, <Rm> */
6276 inst
.operands
[i
].reg
= val
;
6277 inst
.operands
[i
].isreg
= 1;
6278 inst
.operands
[i
].present
= 1;
6280 if (rtype
== REG_TYPE_NQ
)
6282 first_error (_("can't use Neon quad register here"));
6285 else if (rtype
!= REG_TYPE_VFS
)
6288 if (skip_past_comma (&ptr
) == FAIL
)
6290 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6292 inst
.operands
[i
].reg
= val
;
6293 inst
.operands
[i
].isreg
= 1;
6294 inst
.operands
[i
].present
= 1;
6297 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6300 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6301 Case 1: VMOV<c><q> <Dd>, <Dm>
6302 Case 8: VMOV.F32 <Sd>, <Sm>
6303 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6305 inst
.operands
[i
].reg
= val
;
6306 inst
.operands
[i
].isreg
= 1;
6307 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6308 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6309 inst
.operands
[i
].isvec
= 1;
6310 inst
.operands
[i
].vectype
= optype
;
6311 inst
.operands
[i
].present
= 1;
6313 if (skip_past_comma (&ptr
) == SUCCESS
)
6318 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6321 inst
.operands
[i
].reg
= val
;
6322 inst
.operands
[i
].isreg
= 1;
6323 inst
.operands
[i
++].present
= 1;
6325 if (skip_past_comma (&ptr
) == FAIL
)
6328 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6331 inst
.operands
[i
].reg
= val
;
6332 inst
.operands
[i
].isreg
= 1;
6333 inst
.operands
[i
].present
= 1;
6336 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6337 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6338 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6339 Case 10: VMOV.F32 <Sd>, #<imm>
6340 Case 11: VMOV.F64 <Dd>, #<imm> */
6341 inst
.operands
[i
].immisfloat
= 1;
6342 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6344 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6345 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6349 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6353 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6356 inst
.operands
[i
].reg
= val
;
6357 inst
.operands
[i
].isreg
= 1;
6358 inst
.operands
[i
++].present
= 1;
6360 if (skip_past_comma (&ptr
) == FAIL
)
6363 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6365 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6366 inst
.operands
[i
].reg
= val
;
6367 inst
.operands
[i
].isscalar
= 1;
6368 inst
.operands
[i
].present
= 1;
6369 inst
.operands
[i
].vectype
= optype
;
6371 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6373 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6374 inst
.operands
[i
].reg
= val
;
6375 inst
.operands
[i
].isreg
= 1;
6376 inst
.operands
[i
++].present
= 1;
6378 if (skip_past_comma (&ptr
) == FAIL
)
6381 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6384 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6388 inst
.operands
[i
].reg
= val
;
6389 inst
.operands
[i
].isreg
= 1;
6390 inst
.operands
[i
].isvec
= 1;
6391 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6392 inst
.operands
[i
].vectype
= optype
;
6393 inst
.operands
[i
].present
= 1;
6395 if (rtype
== REG_TYPE_VFS
)
6399 if (skip_past_comma (&ptr
) == FAIL
)
6401 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6404 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6407 inst
.operands
[i
].reg
= val
;
6408 inst
.operands
[i
].isreg
= 1;
6409 inst
.operands
[i
].isvec
= 1;
6410 inst
.operands
[i
].issingle
= 1;
6411 inst
.operands
[i
].vectype
= optype
;
6412 inst
.operands
[i
].present
= 1;
6415 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6419 inst
.operands
[i
].reg
= val
;
6420 inst
.operands
[i
].isreg
= 1;
6421 inst
.operands
[i
].isvec
= 1;
6422 inst
.operands
[i
].issingle
= 1;
6423 inst
.operands
[i
].vectype
= optype
;
6424 inst
.operands
[i
].present
= 1;
6429 first_error (_("parse error"));
6433 /* Successfully parsed the operands. Update args. */
6439 first_error (_("expected comma"));
6443 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6447 /* Use this macro when the operand constraints are different
6448 for ARM and THUMB (e.g. ldrd). */
6449 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6450 ((arm_operand) | ((thumb_operand) << 16))
6452 /* Matcher codes for parse_operands. */
6453 enum operand_parse_code
6455 OP_stop
, /* end of line */
6457 OP_RR
, /* ARM register */
6458 OP_RRnpc
, /* ARM register, not r15 */
6459 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6460 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6461 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6462 optional trailing ! */
6463 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6464 OP_RCP
, /* Coprocessor number */
6465 OP_RCN
, /* Coprocessor register */
6466 OP_RF
, /* FPA register */
6467 OP_RVS
, /* VFP single precision register */
6468 OP_RVD
, /* VFP double precision register (0..15) */
6469 OP_RND
, /* Neon double precision register (0..31) */
6470 OP_RNQ
, /* Neon quad precision register */
6471 OP_RVSD
, /* VFP single or double precision register */
6472 OP_RNDQ
, /* Neon double or quad precision register */
6473 OP_RNSDQ
, /* Neon single, double or quad precision register */
6474 OP_RNSC
, /* Neon scalar D[X] */
6475 OP_RVC
, /* VFP control register */
6476 OP_RMF
, /* Maverick F register */
6477 OP_RMD
, /* Maverick D register */
6478 OP_RMFX
, /* Maverick FX register */
6479 OP_RMDX
, /* Maverick DX register */
6480 OP_RMAX
, /* Maverick AX register */
6481 OP_RMDS
, /* Maverick DSPSC register */
6482 OP_RIWR
, /* iWMMXt wR register */
6483 OP_RIWC
, /* iWMMXt wC register */
6484 OP_RIWG
, /* iWMMXt wCG register */
6485 OP_RXA
, /* XScale accumulator register */
6487 OP_REGLST
, /* ARM register list */
6488 OP_VRSLST
, /* VFP single-precision register list */
6489 OP_VRDLST
, /* VFP double-precision register list */
6490 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6491 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6492 OP_NSTRLST
, /* Neon element/structure list */
6494 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6495 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6496 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6497 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6498 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6499 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6500 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6501 OP_VMOV
, /* Neon VMOV operands. */
6502 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6503 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6504 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6506 OP_I0
, /* immediate zero */
6507 OP_I7
, /* immediate value 0 .. 7 */
6508 OP_I15
, /* 0 .. 15 */
6509 OP_I16
, /* 1 .. 16 */
6510 OP_I16z
, /* 0 .. 16 */
6511 OP_I31
, /* 0 .. 31 */
6512 OP_I31w
, /* 0 .. 31, optional trailing ! */
6513 OP_I32
, /* 1 .. 32 */
6514 OP_I32z
, /* 0 .. 32 */
6515 OP_I63
, /* 0 .. 63 */
6516 OP_I63s
, /* -64 .. 63 */
6517 OP_I64
, /* 1 .. 64 */
6518 OP_I64z
, /* 0 .. 64 */
6519 OP_I255
, /* 0 .. 255 */
6521 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6522 OP_I7b
, /* 0 .. 7 */
6523 OP_I15b
, /* 0 .. 15 */
6524 OP_I31b
, /* 0 .. 31 */
6526 OP_SH
, /* shifter operand */
6527 OP_SHG
, /* shifter operand with possible group relocation */
6528 OP_ADDR
, /* Memory address expression (any mode) */
6529 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6530 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6531 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6532 OP_EXP
, /* arbitrary expression */
6533 OP_EXPi
, /* same, with optional immediate prefix */
6534 OP_EXPr
, /* same, with optional relocation suffix */
6535 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6537 OP_CPSF
, /* CPS flags */
6538 OP_ENDI
, /* Endianness specifier */
6539 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6540 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6541 OP_COND
, /* conditional code */
6542 OP_TB
, /* Table branch. */
6544 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6546 OP_RRnpc_I0
, /* ARM register or literal 0 */
6547 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
6548 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6549 OP_RF_IF
, /* FPA register or immediate */
6550 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6551 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6553 /* Optional operands. */
6554 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6555 OP_oI31b
, /* 0 .. 31 */
6556 OP_oI32b
, /* 1 .. 32 */
6557 OP_oI32z
, /* 0 .. 32 */
6558 OP_oIffffb
, /* 0 .. 65535 */
6559 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6561 OP_oRR
, /* ARM register */
6562 OP_oRRnpc
, /* ARM register, not the PC */
6563 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6564 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6565 OP_oRND
, /* Optional Neon double precision register */
6566 OP_oRNQ
, /* Optional Neon quad precision register */
6567 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6568 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6569 OP_oSHll
, /* LSL immediate */
6570 OP_oSHar
, /* ASR immediate */
6571 OP_oSHllar
, /* LSL or ASR immediate */
6572 OP_oROR
, /* ROR 0/8/16/24 */
6573 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6575 /* Some pre-defined mixed (ARM/THUMB) operands. */
6576 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6577 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6578 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6580 OP_FIRST_OPTIONAL
= OP_oI7b
6583 /* Generic instruction operand parser. This does no encoding and no
6584 semantic validation; it merely squirrels values away in the inst
6585 structure. Returns SUCCESS or FAIL depending on whether the
6586 specified grammar matched. */
6588 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6590 unsigned const int *upat
= pattern
;
6591 char *backtrack_pos
= 0;
6592 const char *backtrack_error
= 0;
6593 int i
, val
= 0, backtrack_index
= 0;
6594 enum arm_reg_type rtype
;
6595 parse_operand_result result
;
6596 unsigned int op_parse_code
;
6598 #define po_char_or_fail(chr) \
6601 if (skip_past_char (&str, chr) == FAIL) \
6606 #define po_reg_or_fail(regtype) \
6609 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6610 & inst.operands[i].vectype); \
6613 first_error (_(reg_expected_msgs[regtype])); \
6616 inst.operands[i].reg = val; \
6617 inst.operands[i].isreg = 1; \
6618 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6619 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6620 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6621 || rtype == REG_TYPE_VFD \
6622 || rtype == REG_TYPE_NQ); \
6626 #define po_reg_or_goto(regtype, label) \
6629 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6630 & inst.operands[i].vectype); \
6634 inst.operands[i].reg = val; \
6635 inst.operands[i].isreg = 1; \
6636 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6637 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6638 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6639 || rtype == REG_TYPE_VFD \
6640 || rtype == REG_TYPE_NQ); \
6644 #define po_imm_or_fail(min, max, popt) \
6647 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6649 inst.operands[i].imm = val; \
6653 #define po_scalar_or_goto(elsz, label) \
6656 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6659 inst.operands[i].reg = val; \
6660 inst.operands[i].isscalar = 1; \
6664 #define po_misc_or_fail(expr) \
6672 #define po_misc_or_fail_no_backtrack(expr) \
6676 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6677 backtrack_pos = 0; \
6678 if (result != PARSE_OPERAND_SUCCESS) \
6683 #define po_barrier_or_imm(str) \
6686 val = parse_barrier (&str); \
6687 if (val == FAIL && ! ISALPHA (*str)) \
6690 /* ISB can only take SY as an option. */ \
6691 || ((inst.instruction & 0xf0) == 0x60 \
6694 inst.error = _("invalid barrier type"); \
6695 backtrack_pos = 0; \
6701 skip_whitespace (str
);
6703 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6705 op_parse_code
= upat
[i
];
6706 if (op_parse_code
>= 1<<16)
6707 op_parse_code
= thumb
? (op_parse_code
>> 16)
6708 : (op_parse_code
& ((1<<16)-1));
6710 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6712 /* Remember where we are in case we need to backtrack. */
6713 gas_assert (!backtrack_pos
);
6714 backtrack_pos
= str
;
6715 backtrack_error
= inst
.error
;
6716 backtrack_index
= i
;
6719 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6720 po_char_or_fail (',');
6722 switch (op_parse_code
)
6730 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6731 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6732 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6733 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6734 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6735 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6737 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6739 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6741 /* Also accept generic coprocessor regs for unknown registers. */
6743 po_reg_or_fail (REG_TYPE_CN
);
6745 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6746 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6747 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6748 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6749 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6750 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6751 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6752 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6753 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6754 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6756 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6758 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6759 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6761 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6763 /* Neon scalar. Using an element size of 8 means that some invalid
6764 scalars are accepted here, so deal with those in later code. */
6765 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6769 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6772 po_imm_or_fail (0, 0, TRUE
);
6777 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6782 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6785 if (parse_ifimm_zero (&str
))
6786 inst
.operands
[i
].imm
= 0;
6790 = _("only floating point zero is allowed as immediate value");
6798 po_scalar_or_goto (8, try_rr
);
6801 po_reg_or_fail (REG_TYPE_RN
);
6807 po_scalar_or_goto (8, try_nsdq
);
6810 po_reg_or_fail (REG_TYPE_NSDQ
);
6816 po_scalar_or_goto (8, try_ndq
);
6819 po_reg_or_fail (REG_TYPE_NDQ
);
6825 po_scalar_or_goto (8, try_vfd
);
6828 po_reg_or_fail (REG_TYPE_VFD
);
6833 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6834 not careful then bad things might happen. */
6835 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6840 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6843 /* There's a possibility of getting a 64-bit immediate here, so
6844 we need special handling. */
6845 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6848 inst
.error
= _("immediate value is out of range");
6856 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6859 po_imm_or_fail (0, 63, TRUE
);
6864 po_char_or_fail ('[');
6865 po_reg_or_fail (REG_TYPE_RN
);
6866 po_char_or_fail (']');
6872 po_reg_or_fail (REG_TYPE_RN
);
6873 if (skip_past_char (&str
, '!') == SUCCESS
)
6874 inst
.operands
[i
].writeback
= 1;
6878 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6879 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6880 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6881 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6882 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6883 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6884 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6885 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6886 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6887 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6888 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6889 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6891 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6893 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6894 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6896 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6897 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6898 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6899 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6901 /* Immediate variants */
6903 po_char_or_fail ('{');
6904 po_imm_or_fail (0, 255, TRUE
);
6905 po_char_or_fail ('}');
6909 /* The expression parser chokes on a trailing !, so we have
6910 to find it first and zap it. */
6913 while (*s
&& *s
!= ',')
6918 inst
.operands
[i
].writeback
= 1;
6920 po_imm_or_fail (0, 31, TRUE
);
6928 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6933 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6938 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6940 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6942 val
= parse_reloc (&str
);
6945 inst
.error
= _("unrecognized relocation suffix");
6948 else if (val
!= BFD_RELOC_UNUSED
)
6950 inst
.operands
[i
].imm
= val
;
6951 inst
.operands
[i
].hasreloc
= 1;
6956 /* Operand for MOVW or MOVT. */
6958 po_misc_or_fail (parse_half (&str
));
6961 /* Register or expression. */
6962 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6963 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6965 /* Register or immediate. */
6966 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6967 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6969 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6971 if (!is_immediate_prefix (*str
))
6974 val
= parse_fpa_immediate (&str
);
6977 /* FPA immediates are encoded as registers 8-15.
6978 parse_fpa_immediate has already applied the offset. */
6979 inst
.operands
[i
].reg
= val
;
6980 inst
.operands
[i
].isreg
= 1;
6983 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6984 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6986 /* Two kinds of register. */
6989 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6991 || (rege
->type
!= REG_TYPE_MMXWR
6992 && rege
->type
!= REG_TYPE_MMXWC
6993 && rege
->type
!= REG_TYPE_MMXWCG
))
6995 inst
.error
= _("iWMMXt data or control register expected");
6998 inst
.operands
[i
].reg
= rege
->number
;
6999 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7005 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7007 || (rege
->type
!= REG_TYPE_MMXWC
7008 && rege
->type
!= REG_TYPE_MMXWCG
))
7010 inst
.error
= _("iWMMXt control register expected");
7013 inst
.operands
[i
].reg
= rege
->number
;
7014 inst
.operands
[i
].isreg
= 1;
7019 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7020 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7021 case OP_oROR
: val
= parse_ror (&str
); break;
7022 case OP_COND
: val
= parse_cond (&str
); break;
7023 case OP_oBARRIER_I15
:
7024 po_barrier_or_imm (str
); break;
7026 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7032 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7033 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7035 inst
.error
= _("Banked registers are not available with this "
7041 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7045 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7048 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7050 if (strncasecmp (str
, "APSR_", 5) == 0)
7057 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7058 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7059 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7060 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7061 default: found
= 16;
7065 inst
.operands
[i
].isvec
= 1;
7066 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7067 inst
.operands
[i
].reg
= REG_PC
;
7074 po_misc_or_fail (parse_tb (&str
));
7077 /* Register lists. */
7079 val
= parse_reg_list (&str
);
7082 inst
.operands
[i
].writeback
= 1;
7088 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7092 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7096 /* Allow Q registers too. */
7097 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7102 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7104 inst
.operands
[i
].issingle
= 1;
7109 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7114 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7115 &inst
.operands
[i
].vectype
);
7118 /* Addressing modes */
7120 po_misc_or_fail (parse_address (&str
, i
));
7124 po_misc_or_fail_no_backtrack (
7125 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7129 po_misc_or_fail_no_backtrack (
7130 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7134 po_misc_or_fail_no_backtrack (
7135 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7139 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7143 po_misc_or_fail_no_backtrack (
7144 parse_shifter_operand_group_reloc (&str
, i
));
7148 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7152 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7156 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7160 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7163 /* Various value-based sanity checks and shared operations. We
7164 do not signal immediate failures for the register constraints;
7165 this allows a syntax error to take precedence. */
7166 switch (op_parse_code
)
7174 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7175 inst
.error
= BAD_PC
;
7180 if (inst
.operands
[i
].isreg
)
7182 if (inst
.operands
[i
].reg
== REG_PC
)
7183 inst
.error
= BAD_PC
;
7184 else if (inst
.operands
[i
].reg
== REG_SP
)
7185 inst
.error
= BAD_SP
;
7190 if (inst
.operands
[i
].isreg
7191 && inst
.operands
[i
].reg
== REG_PC
7192 && (inst
.operands
[i
].writeback
|| thumb
))
7193 inst
.error
= BAD_PC
;
7202 case OP_oBARRIER_I15
:
7211 inst
.operands
[i
].imm
= val
;
7218 /* If we get here, this operand was successfully parsed. */
7219 inst
.operands
[i
].present
= 1;
7223 inst
.error
= BAD_ARGS
;
7228 /* The parse routine should already have set inst.error, but set a
7229 default here just in case. */
7231 inst
.error
= _("syntax error");
7235 /* Do not backtrack over a trailing optional argument that
7236 absorbed some text. We will only fail again, with the
7237 'garbage following instruction' error message, which is
7238 probably less helpful than the current one. */
7239 if (backtrack_index
== i
&& backtrack_pos
!= str
7240 && upat
[i
+1] == OP_stop
)
7243 inst
.error
= _("syntax error");
7247 /* Try again, skipping the optional argument at backtrack_pos. */
7248 str
= backtrack_pos
;
7249 inst
.error
= backtrack_error
;
7250 inst
.operands
[backtrack_index
].present
= 0;
7251 i
= backtrack_index
;
7255 /* Check that we have parsed all the arguments. */
7256 if (*str
!= '\0' && !inst
.error
)
7257 inst
.error
= _("garbage following instruction");
7259 return inst
.error
? FAIL
: SUCCESS
;
7262 #undef po_char_or_fail
7263 #undef po_reg_or_fail
7264 #undef po_reg_or_goto
7265 #undef po_imm_or_fail
7266 #undef po_scalar_or_fail
7267 #undef po_barrier_or_imm
7269 /* Shorthand macro for instruction encoding functions issuing errors. */
7270 #define constraint(expr, err) \
7281 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7282 instructions are unpredictable if these registers are used. This
7283 is the BadReg predicate in ARM's Thumb-2 documentation. */
7284 #define reject_bad_reg(reg) \
7286 if (reg == REG_SP || reg == REG_PC) \
7288 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7293 /* If REG is R13 (the stack pointer), warn that its use is
7295 #define warn_deprecated_sp(reg) \
7297 if (warn_on_deprecated && reg == REG_SP) \
7298 as_tsktsk (_("use of r13 is deprecated")); \
7301 /* Functions for operand encoding. ARM, then Thumb. */
7303 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7305 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7307 The only binary encoding difference is the Coprocessor number. Coprocessor
7308 9 is used for half-precision calculations or conversions. The format of the
7309 instruction is the same as the equivalent Coprocessor 10 instruction that
7310 exists for Single-Precision operation. */
7313 do_scalar_fp16_v82_encode (void)
7315 if (inst
.cond
!= COND_ALWAYS
)
7316 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7317 " the behaviour is UNPREDICTABLE"));
7318 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7321 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7322 mark_feature_used (&arm_ext_fp16
);
7325 /* If VAL can be encoded in the immediate field of an ARM instruction,
7326 return the encoded form. Otherwise, return FAIL. */
7329 encode_arm_immediate (unsigned int val
)
7336 for (i
= 2; i
< 32; i
+= 2)
7337 if ((a
= rotate_left (val
, i
)) <= 0xff)
7338 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7343 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7344 return the encoded form. Otherwise, return FAIL. */
7346 encode_thumb32_immediate (unsigned int val
)
7353 for (i
= 1; i
<= 24; i
++)
7356 if ((val
& ~(0xff << i
)) == 0)
7357 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7361 if (val
== ((a
<< 16) | a
))
7363 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7367 if (val
== ((a
<< 16) | a
))
7368 return 0x200 | (a
>> 8);
7372 /* Encode a VFP SP or DP register number into inst.instruction. */
7375 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7377 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7380 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7383 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7386 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7391 first_error (_("D register out of range for selected VFP version"));
7399 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7403 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7407 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7411 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7415 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7419 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7427 /* Encode a <shift> in an ARM-format instruction. The immediate,
7428 if any, is handled by md_apply_fix. */
7430 encode_arm_shift (int i
)
7432 /* register-shifted register. */
7433 if (inst
.operands
[i
].immisreg
)
7436 for (index
= 0; index
<= i
; ++index
)
7438 /* Check the operand only when it's presented. In pre-UAL syntax,
7439 if the destination register is the same as the first operand, two
7440 register form of the instruction can be used. */
7441 if (inst
.operands
[index
].present
&& inst
.operands
[index
].isreg
7442 && inst
.operands
[index
].reg
== REG_PC
)
7443 as_warn (UNPRED_REG ("r15"));
7446 if (inst
.operands
[i
].imm
== REG_PC
)
7447 as_warn (UNPRED_REG ("r15"));
7450 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7451 inst
.instruction
|= SHIFT_ROR
<< 5;
7454 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7455 if (inst
.operands
[i
].immisreg
)
7457 inst
.instruction
|= SHIFT_BY_REG
;
7458 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7461 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7466 encode_arm_shifter_operand (int i
)
7468 if (inst
.operands
[i
].isreg
)
7470 inst
.instruction
|= inst
.operands
[i
].reg
;
7471 encode_arm_shift (i
);
7475 inst
.instruction
|= INST_IMMEDIATE
;
7476 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7477 inst
.instruction
|= inst
.operands
[i
].imm
;
7481 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7483 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7486 Generate an error if the operand is not a register. */
7487 constraint (!inst
.operands
[i
].isreg
,
7488 _("Instruction does not support =N addresses"));
7490 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7492 if (inst
.operands
[i
].preind
)
7496 inst
.error
= _("instruction does not accept preindexed addressing");
7499 inst
.instruction
|= PRE_INDEX
;
7500 if (inst
.operands
[i
].writeback
)
7501 inst
.instruction
|= WRITE_BACK
;
7504 else if (inst
.operands
[i
].postind
)
7506 gas_assert (inst
.operands
[i
].writeback
);
7508 inst
.instruction
|= WRITE_BACK
;
7510 else /* unindexed - only for coprocessor */
7512 inst
.error
= _("instruction does not accept unindexed addressing");
7516 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7517 && (((inst
.instruction
& 0x000f0000) >> 16)
7518 == ((inst
.instruction
& 0x0000f000) >> 12)))
7519 as_warn ((inst
.instruction
& LOAD_BIT
)
7520 ? _("destination register same as write-back base")
7521 : _("source register same as write-back base"));
7524 /* inst.operands[i] was set up by parse_address. Encode it into an
7525 ARM-format mode 2 load or store instruction. If is_t is true,
7526 reject forms that cannot be used with a T instruction (i.e. not
7529 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7531 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7533 encode_arm_addr_mode_common (i
, is_t
);
7535 if (inst
.operands
[i
].immisreg
)
7537 constraint ((inst
.operands
[i
].imm
== REG_PC
7538 || (is_pc
&& inst
.operands
[i
].writeback
)),
7540 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7541 inst
.instruction
|= inst
.operands
[i
].imm
;
7542 if (!inst
.operands
[i
].negative
)
7543 inst
.instruction
|= INDEX_UP
;
7544 if (inst
.operands
[i
].shifted
)
7546 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7547 inst
.instruction
|= SHIFT_ROR
<< 5;
7550 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7551 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7555 else /* immediate offset in inst.reloc */
7557 if (is_pc
&& !inst
.reloc
.pc_rel
)
7559 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7561 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7562 cannot use PC in addressing.
7563 PC cannot be used in writeback addressing, either. */
7564 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7567 /* Use of PC in str is deprecated for ARMv7. */
7568 if (warn_on_deprecated
7570 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7571 as_tsktsk (_("use of PC in this instruction is deprecated"));
7574 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7576 /* Prefer + for zero encoded value. */
7577 if (!inst
.operands
[i
].negative
)
7578 inst
.instruction
|= INDEX_UP
;
7579 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7584 /* inst.operands[i] was set up by parse_address. Encode it into an
7585 ARM-format mode 3 load or store instruction. Reject forms that
7586 cannot be used with such instructions. If is_t is true, reject
7587 forms that cannot be used with a T instruction (i.e. not
7590 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7592 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7594 inst
.error
= _("instruction does not accept scaled register index");
7598 encode_arm_addr_mode_common (i
, is_t
);
7600 if (inst
.operands
[i
].immisreg
)
7602 constraint ((inst
.operands
[i
].imm
== REG_PC
7603 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7605 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7607 inst
.instruction
|= inst
.operands
[i
].imm
;
7608 if (!inst
.operands
[i
].negative
)
7609 inst
.instruction
|= INDEX_UP
;
7611 else /* immediate offset in inst.reloc */
7613 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7614 && inst
.operands
[i
].writeback
),
7616 inst
.instruction
|= HWOFFSET_IMM
;
7617 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7619 /* Prefer + for zero encoded value. */
7620 if (!inst
.operands
[i
].negative
)
7621 inst
.instruction
|= INDEX_UP
;
7623 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7628 /* Write immediate bits [7:0] to the following locations:
7630 |28/24|23 19|18 16|15 4|3 0|
7631 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7633 This function is used by VMOV/VMVN/VORR/VBIC. */
7636 neon_write_immbits (unsigned immbits
)
7638 inst
.instruction
|= immbits
& 0xf;
7639 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7640 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7643 /* Invert low-order SIZE bits of XHI:XLO. */
7646 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7648 unsigned immlo
= xlo
? *xlo
: 0;
7649 unsigned immhi
= xhi
? *xhi
: 0;
7654 immlo
= (~immlo
) & 0xff;
7658 immlo
= (~immlo
) & 0xffff;
7662 immhi
= (~immhi
) & 0xffffffff;
7666 immlo
= (~immlo
) & 0xffffffff;
7680 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7684 neon_bits_same_in_bytes (unsigned imm
)
7686 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7687 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7688 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7689 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7692 /* For immediate of above form, return 0bABCD. */
7695 neon_squash_bits (unsigned imm
)
7697 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7698 | ((imm
& 0x01000000) >> 21);
7701 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7704 neon_qfloat_bits (unsigned imm
)
7706 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7709 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7710 the instruction. *OP is passed as the initial value of the op field, and
7711 may be set to a different value depending on the constant (i.e.
7712 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7713 MVN). If the immediate looks like a repeated pattern then also
7714 try smaller element sizes. */
7717 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7718 unsigned *immbits
, int *op
, int size
,
7719 enum neon_el_type type
)
7721 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7723 if (type
== NT_float
&& !float_p
)
7726 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7728 if (size
!= 32 || *op
== 1)
7730 *immbits
= neon_qfloat_bits (immlo
);
7736 if (neon_bits_same_in_bytes (immhi
)
7737 && neon_bits_same_in_bytes (immlo
))
7741 *immbits
= (neon_squash_bits (immhi
) << 4)
7742 | neon_squash_bits (immlo
);
7753 if (immlo
== (immlo
& 0x000000ff))
7758 else if (immlo
== (immlo
& 0x0000ff00))
7760 *immbits
= immlo
>> 8;
7763 else if (immlo
== (immlo
& 0x00ff0000))
7765 *immbits
= immlo
>> 16;
7768 else if (immlo
== (immlo
& 0xff000000))
7770 *immbits
= immlo
>> 24;
7773 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7775 *immbits
= (immlo
>> 8) & 0xff;
7778 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7780 *immbits
= (immlo
>> 16) & 0xff;
7784 if ((immlo
& 0xffff) != (immlo
>> 16))
7791 if (immlo
== (immlo
& 0x000000ff))
7796 else if (immlo
== (immlo
& 0x0000ff00))
7798 *immbits
= immlo
>> 8;
7802 if ((immlo
& 0xff) != (immlo
>> 8))
7807 if (immlo
== (immlo
& 0x000000ff))
7809 /* Don't allow MVN with 8-bit immediate. */
7819 #if defined BFD_HOST_64_BIT
7820 /* Returns TRUE if double precision value V may be cast
7821 to single precision without loss of accuracy. */
7824 is_double_a_single (bfd_int64_t v
)
7826 int exp
= (int)((v
>> 52) & 0x7FF);
7827 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7829 return (exp
== 0 || exp
== 0x7FF
7830 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7831 && (mantissa
& 0x1FFFFFFFl
) == 0;
7834 /* Returns a double precision value casted to single precision
7835 (ignoring the least significant bits in exponent and mantissa). */
7838 double_to_single (bfd_int64_t v
)
7840 int sign
= (int) ((v
>> 63) & 1l);
7841 int exp
= (int) ((v
>> 52) & 0x7FF);
7842 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7848 exp
= exp
- 1023 + 127;
7857 /* No denormalized numbers. */
7863 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7865 #endif /* BFD_HOST_64_BIT */
7874 static void do_vfp_nsyn_opcode (const char *);
7876 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7877 Determine whether it can be performed with a move instruction; if
7878 it can, convert inst.instruction to that move instruction and
7879 return TRUE; if it can't, convert inst.instruction to a literal-pool
7880 load and return FALSE. If this is not a valid thing to do in the
7881 current context, set inst.error and return TRUE.
7883 inst.operands[i] describes the destination register. */
7886 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7889 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7890 bfd_boolean arm_p
= (t
== CONST_ARM
);
7893 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7897 if ((inst
.instruction
& tbit
) == 0)
7899 inst
.error
= _("invalid pseudo operation");
7903 if (inst
.reloc
.exp
.X_op
!= O_constant
7904 && inst
.reloc
.exp
.X_op
!= O_symbol
7905 && inst
.reloc
.exp
.X_op
!= O_big
)
7907 inst
.error
= _("constant expression expected");
7911 if (inst
.reloc
.exp
.X_op
== O_constant
7912 || inst
.reloc
.exp
.X_op
== O_big
)
7914 #if defined BFD_HOST_64_BIT
7919 if (inst
.reloc
.exp
.X_op
== O_big
)
7921 LITTLENUM_TYPE w
[X_PRECISION
];
7924 if (inst
.reloc
.exp
.X_add_number
== -1)
7926 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7928 /* FIXME: Should we check words w[2..5] ? */
7933 #if defined BFD_HOST_64_BIT
7935 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7936 << LITTLENUM_NUMBER_OF_BITS
)
7937 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7938 << LITTLENUM_NUMBER_OF_BITS
)
7939 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7940 << LITTLENUM_NUMBER_OF_BITS
)
7941 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7943 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7944 | (l
[0] & LITTLENUM_MASK
);
7948 v
= inst
.reloc
.exp
.X_add_number
;
7950 if (!inst
.operands
[i
].issingle
)
7954 /* This can be encoded only for a low register. */
7955 if ((v
& ~0xFF) == 0 && (inst
.operands
[i
].reg
< 8))
7957 /* This can be done with a mov(1) instruction. */
7958 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7959 inst
.instruction
|= v
;
7963 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7964 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7966 /* Check if on thumb2 it can be done with a mov.w, mvn or
7967 movw instruction. */
7968 unsigned int newimm
;
7969 bfd_boolean isNegated
;
7971 newimm
= encode_thumb32_immediate (v
);
7972 if (newimm
!= (unsigned int) FAIL
)
7976 newimm
= encode_thumb32_immediate (~v
);
7977 if (newimm
!= (unsigned int) FAIL
)
7981 /* The number can be loaded with a mov.w or mvn
7983 if (newimm
!= (unsigned int) FAIL
7984 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
7986 inst
.instruction
= (0xf04f0000 /* MOV.W. */
7987 | (inst
.operands
[i
].reg
<< 8));
7988 /* Change to MOVN. */
7989 inst
.instruction
|= (isNegated
? 0x200000 : 0);
7990 inst
.instruction
|= (newimm
& 0x800) << 15;
7991 inst
.instruction
|= (newimm
& 0x700) << 4;
7992 inst
.instruction
|= (newimm
& 0x0ff);
7995 /* The number can be loaded with a movw instruction. */
7996 else if ((v
& ~0xFFFF) == 0
7997 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7999 int imm
= v
& 0xFFFF;
8001 inst
.instruction
= 0xf2400000; /* MOVW. */
8002 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8003 inst
.instruction
|= (imm
& 0xf000) << 4;
8004 inst
.instruction
|= (imm
& 0x0800) << 15;
8005 inst
.instruction
|= (imm
& 0x0700) << 4;
8006 inst
.instruction
|= (imm
& 0x00ff);
8013 int value
= encode_arm_immediate (v
);
8017 /* This can be done with a mov instruction. */
8018 inst
.instruction
&= LITERAL_MASK
;
8019 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8020 inst
.instruction
|= value
& 0xfff;
8024 value
= encode_arm_immediate (~ v
);
8027 /* This can be done with a mvn instruction. */
8028 inst
.instruction
&= LITERAL_MASK
;
8029 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8030 inst
.instruction
|= value
& 0xfff;
8034 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8037 unsigned immbits
= 0;
8038 unsigned immlo
= inst
.operands
[1].imm
;
8039 unsigned immhi
= inst
.operands
[1].regisimm
8040 ? inst
.operands
[1].reg
8041 : inst
.reloc
.exp
.X_unsigned
8043 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8044 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8045 &op
, 64, NT_invtype
);
8049 neon_invert_size (&immlo
, &immhi
, 64);
8051 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8052 &op
, 64, NT_invtype
);
8057 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8063 /* Fill other bits in vmov encoding for both thumb and arm. */
8065 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8067 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8068 neon_write_immbits (immbits
);
8076 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8077 if (inst
.operands
[i
].issingle
8078 && is_quarter_float (inst
.operands
[1].imm
)
8079 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8081 inst
.operands
[1].imm
=
8082 neon_qfloat_bits (v
);
8083 do_vfp_nsyn_opcode ("fconsts");
8087 /* If our host does not support a 64-bit type then we cannot perform
8088 the following optimization. This mean that there will be a
8089 discrepancy between the output produced by an assembler built for
8090 a 32-bit-only host and the output produced from a 64-bit host, but
8091 this cannot be helped. */
8092 #if defined BFD_HOST_64_BIT
8093 else if (!inst
.operands
[1].issingle
8094 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8096 if (is_double_a_single (v
)
8097 && is_quarter_float (double_to_single (v
)))
8099 inst
.operands
[1].imm
=
8100 neon_qfloat_bits (double_to_single (v
));
8101 do_vfp_nsyn_opcode ("fconstd");
8109 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8110 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8113 inst
.operands
[1].reg
= REG_PC
;
8114 inst
.operands
[1].isreg
= 1;
8115 inst
.operands
[1].preind
= 1;
8116 inst
.reloc
.pc_rel
= 1;
8117 inst
.reloc
.type
= (thumb_p
8118 ? BFD_RELOC_ARM_THUMB_OFFSET
8120 ? BFD_RELOC_ARM_HWLITERAL
8121 : BFD_RELOC_ARM_LITERAL
));
8125 /* inst.operands[i] was set up by parse_address. Encode it into an
8126 ARM-format instruction. Reject all forms which cannot be encoded
8127 into a coprocessor load/store instruction. If wb_ok is false,
8128 reject use of writeback; if unind_ok is false, reject use of
8129 unindexed addressing. If reloc_override is not 0, use it instead
8130 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8131 (in which case it is preserved). */
8134 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8136 if (!inst
.operands
[i
].isreg
)
8139 if (! inst
.operands
[0].isvec
)
8141 inst
.error
= _("invalid co-processor operand");
8144 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8148 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8150 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8152 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8154 gas_assert (!inst
.operands
[i
].writeback
);
8157 inst
.error
= _("instruction does not support unindexed addressing");
8160 inst
.instruction
|= inst
.operands
[i
].imm
;
8161 inst
.instruction
|= INDEX_UP
;
8165 if (inst
.operands
[i
].preind
)
8166 inst
.instruction
|= PRE_INDEX
;
8168 if (inst
.operands
[i
].writeback
)
8170 if (inst
.operands
[i
].reg
== REG_PC
)
8172 inst
.error
= _("pc may not be used with write-back");
8177 inst
.error
= _("instruction does not support writeback");
8180 inst
.instruction
|= WRITE_BACK
;
8184 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8185 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8186 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8187 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8190 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8192 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8195 /* Prefer + for zero encoded value. */
8196 if (!inst
.operands
[i
].negative
)
8197 inst
.instruction
|= INDEX_UP
;
8202 /* Functions for instruction encoding, sorted by sub-architecture.
8203 First some generics; their names are taken from the conventional
8204 bit positions for register arguments in ARM format instructions. */
8214 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8220 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8226 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8227 inst
.instruction
|= inst
.operands
[1].reg
;
8233 inst
.instruction
|= inst
.operands
[0].reg
;
8234 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8240 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8241 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8247 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8248 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8254 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8255 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8259 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8261 if (ARM_CPU_IS_ANY (cpu_variant
))
8263 as_tsktsk ("%s", msg
);
8266 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8278 unsigned Rn
= inst
.operands
[2].reg
;
8279 /* Enforce restrictions on SWP instruction. */
8280 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8282 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8283 _("Rn must not overlap other operands"));
8285 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8287 if (!check_obsolete (&arm_ext_v8
,
8288 _("swp{b} use is obsoleted for ARMv8 and later"))
8289 && warn_on_deprecated
8290 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8291 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8294 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8295 inst
.instruction
|= inst
.operands
[1].reg
;
8296 inst
.instruction
|= Rn
<< 16;
8302 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8303 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8304 inst
.instruction
|= inst
.operands
[2].reg
;
8310 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8311 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8312 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8313 || inst
.reloc
.exp
.X_add_number
!= 0),
8315 inst
.instruction
|= inst
.operands
[0].reg
;
8316 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8317 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8323 inst
.instruction
|= inst
.operands
[0].imm
;
8329 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8330 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8333 /* ARM instructions, in alphabetical order by function name (except
8334 that wrapper functions appear immediately after the function they
8337 /* This is a pseudo-op of the form "adr rd, label" to be converted
8338 into a relative address of the form "add rd, pc, #label-.-8". */
8343 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8345 /* Frag hacking will turn this into a sub instruction if the offset turns
8346 out to be negative. */
8347 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8348 inst
.reloc
.pc_rel
= 1;
8349 inst
.reloc
.exp
.X_add_number
-= 8;
8352 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8353 into a relative address of the form:
8354 add rd, pc, #low(label-.-8)"
8355 add rd, rd, #high(label-.-8)" */
8360 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8362 /* Frag hacking will turn this into a sub instruction if the offset turns
8363 out to be negative. */
8364 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8365 inst
.reloc
.pc_rel
= 1;
8366 inst
.size
= INSN_SIZE
* 2;
8367 inst
.reloc
.exp
.X_add_number
-= 8;
8373 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8374 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8376 if (!inst
.operands
[1].present
)
8377 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8378 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8379 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8380 encode_arm_shifter_operand (2);
8386 if (inst
.operands
[0].present
)
8387 inst
.instruction
|= inst
.operands
[0].imm
;
8389 inst
.instruction
|= 0xf;
8395 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8396 constraint (msb
> 32, _("bit-field extends past end of register"));
8397 /* The instruction encoding stores the LSB and MSB,
8398 not the LSB and width. */
8399 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8400 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8401 inst
.instruction
|= (msb
- 1) << 16;
8409 /* #0 in second position is alternative syntax for bfc, which is
8410 the same instruction but with REG_PC in the Rm field. */
8411 if (!inst
.operands
[1].isreg
)
8412 inst
.operands
[1].reg
= REG_PC
;
8414 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8415 constraint (msb
> 32, _("bit-field extends past end of register"));
8416 /* The instruction encoding stores the LSB and MSB,
8417 not the LSB and width. */
8418 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8419 inst
.instruction
|= inst
.operands
[1].reg
;
8420 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8421 inst
.instruction
|= (msb
- 1) << 16;
8427 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8428 _("bit-field extends past end of register"));
8429 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8430 inst
.instruction
|= inst
.operands
[1].reg
;
8431 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8432 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8435 /* ARM V5 breakpoint instruction (argument parse)
8436 BKPT <16 bit unsigned immediate>
8437 Instruction is not conditional.
8438 The bit pattern given in insns[] has the COND_ALWAYS condition,
8439 and it is an error if the caller tried to override that. */
8444 /* Top 12 of 16 bits to bits 19:8. */
8445 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8447 /* Bottom 4 of 16 bits to bits 3:0. */
8448 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8452 encode_branch (int default_reloc
)
8454 if (inst
.operands
[0].hasreloc
)
8456 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8457 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8458 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8459 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8460 ? BFD_RELOC_ARM_PLT32
8461 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8464 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8465 inst
.reloc
.pc_rel
= 1;
8472 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8473 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8476 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8483 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8485 if (inst
.cond
== COND_ALWAYS
)
8486 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8488 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8492 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8495 /* ARM V5 branch-link-exchange instruction (argument parse)
8496 BLX <target_addr> ie BLX(1)
8497 BLX{<condition>} <Rm> ie BLX(2)
8498 Unfortunately, there are two different opcodes for this mnemonic.
8499 So, the insns[].value is not used, and the code here zaps values
8500 into inst.instruction.
8501 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8506 if (inst
.operands
[0].isreg
)
8508 /* Arg is a register; the opcode provided by insns[] is correct.
8509 It is not illegal to do "blx pc", just useless. */
8510 if (inst
.operands
[0].reg
== REG_PC
)
8511 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8513 inst
.instruction
|= inst
.operands
[0].reg
;
8517 /* Arg is an address; this instruction cannot be executed
8518 conditionally, and the opcode must be adjusted.
8519 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8520 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8521 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8522 inst
.instruction
= 0xfa000000;
8523 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8530 bfd_boolean want_reloc
;
8532 if (inst
.operands
[0].reg
== REG_PC
)
8533 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8535 inst
.instruction
|= inst
.operands
[0].reg
;
8536 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8537 it is for ARMv4t or earlier. */
8538 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8539 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8543 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8548 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8552 /* ARM v5TEJ. Jump to Jazelle code. */
8557 if (inst
.operands
[0].reg
== REG_PC
)
8558 as_tsktsk (_("use of r15 in bxj is not really useful"));
8560 inst
.instruction
|= inst
.operands
[0].reg
;
8563 /* Co-processor data operation:
8564 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8565 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8569 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8570 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8571 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8572 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8573 inst
.instruction
|= inst
.operands
[4].reg
;
8574 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8580 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8581 encode_arm_shifter_operand (1);
8584 /* Transfer between coprocessor and ARM registers.
8585 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8590 No special properties. */
8592 struct deprecated_coproc_regs_s
8599 arm_feature_set deprecated
;
8600 arm_feature_set obsoleted
;
8601 const char *dep_msg
;
8602 const char *obs_msg
;
8605 #define DEPR_ACCESS_V8 \
8606 N_("This coprocessor register access is deprecated in ARMv8")
8608 /* Table of all deprecated coprocessor registers. */
8609 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8611 {15, 0, 7, 10, 5, /* CP15DMB. */
8612 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8613 DEPR_ACCESS_V8
, NULL
},
8614 {15, 0, 7, 10, 4, /* CP15DSB. */
8615 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8616 DEPR_ACCESS_V8
, NULL
},
8617 {15, 0, 7, 5, 4, /* CP15ISB. */
8618 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8619 DEPR_ACCESS_V8
, NULL
},
8620 {14, 6, 1, 0, 0, /* TEEHBR. */
8621 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8622 DEPR_ACCESS_V8
, NULL
},
8623 {14, 6, 0, 0, 0, /* TEECR. */
8624 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8625 DEPR_ACCESS_V8
, NULL
},
8628 #undef DEPR_ACCESS_V8
8630 static const size_t deprecated_coproc_reg_count
=
8631 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8639 Rd
= inst
.operands
[2].reg
;
8642 if (inst
.instruction
== 0xee000010
8643 || inst
.instruction
== 0xfe000010)
8645 reject_bad_reg (Rd
);
8648 constraint (Rd
== REG_SP
, BAD_SP
);
8653 if (inst
.instruction
== 0xe000010)
8654 constraint (Rd
== REG_PC
, BAD_PC
);
8657 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8659 const struct deprecated_coproc_regs_s
*r
=
8660 deprecated_coproc_regs
+ i
;
8662 if (inst
.operands
[0].reg
== r
->cp
8663 && inst
.operands
[1].imm
== r
->opc1
8664 && inst
.operands
[3].reg
== r
->crn
8665 && inst
.operands
[4].reg
== r
->crm
8666 && inst
.operands
[5].imm
== r
->opc2
)
8668 if (! ARM_CPU_IS_ANY (cpu_variant
)
8669 && warn_on_deprecated
8670 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8671 as_tsktsk ("%s", r
->dep_msg
);
8675 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8676 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8677 inst
.instruction
|= Rd
<< 12;
8678 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8679 inst
.instruction
|= inst
.operands
[4].reg
;
8680 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8683 /* Transfer between coprocessor register and pair of ARM registers.
8684 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8689 Two XScale instructions are special cases of these:
8691 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8692 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8694 Result unpredictable if Rd or Rn is R15. */
8701 Rd
= inst
.operands
[2].reg
;
8702 Rn
= inst
.operands
[3].reg
;
8706 reject_bad_reg (Rd
);
8707 reject_bad_reg (Rn
);
8711 constraint (Rd
== REG_PC
, BAD_PC
);
8712 constraint (Rn
== REG_PC
, BAD_PC
);
8715 /* Only check the MRRC{2} variants. */
8716 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
8718 /* If Rd == Rn, error that the operation is
8719 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8720 constraint (Rd
== Rn
, BAD_OVERLAP
);
8723 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8724 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8725 inst
.instruction
|= Rd
<< 12;
8726 inst
.instruction
|= Rn
<< 16;
8727 inst
.instruction
|= inst
.operands
[4].reg
;
8733 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8734 if (inst
.operands
[1].present
)
8736 inst
.instruction
|= CPSI_MMOD
;
8737 inst
.instruction
|= inst
.operands
[1].imm
;
8744 inst
.instruction
|= inst
.operands
[0].imm
;
8750 unsigned Rd
, Rn
, Rm
;
8752 Rd
= inst
.operands
[0].reg
;
8753 Rn
= (inst
.operands
[1].present
8754 ? inst
.operands
[1].reg
: Rd
);
8755 Rm
= inst
.operands
[2].reg
;
8757 constraint ((Rd
== REG_PC
), BAD_PC
);
8758 constraint ((Rn
== REG_PC
), BAD_PC
);
8759 constraint ((Rm
== REG_PC
), BAD_PC
);
8761 inst
.instruction
|= Rd
<< 16;
8762 inst
.instruction
|= Rn
<< 0;
8763 inst
.instruction
|= Rm
<< 8;
8769 /* There is no IT instruction in ARM mode. We
8770 process it to do the validation as if in
8771 thumb mode, just in case the code gets
8772 assembled for thumb using the unified syntax. */
8777 set_it_insn_type (IT_INSN
);
8778 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8779 now_it
.cc
= inst
.operands
[0].imm
;
8783 /* If there is only one register in the register list,
8784 then return its register number. Otherwise return -1. */
8786 only_one_reg_in_list (int range
)
8788 int i
= ffs (range
) - 1;
8789 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8793 encode_ldmstm(int from_push_pop_mnem
)
8795 int base_reg
= inst
.operands
[0].reg
;
8796 int range
= inst
.operands
[1].imm
;
8799 inst
.instruction
|= base_reg
<< 16;
8800 inst
.instruction
|= range
;
8802 if (inst
.operands
[1].writeback
)
8803 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8805 if (inst
.operands
[0].writeback
)
8807 inst
.instruction
|= WRITE_BACK
;
8808 /* Check for unpredictable uses of writeback. */
8809 if (inst
.instruction
& LOAD_BIT
)
8811 /* Not allowed in LDM type 2. */
8812 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8813 && ((range
& (1 << REG_PC
)) == 0))
8814 as_warn (_("writeback of base register is UNPREDICTABLE"));
8815 /* Only allowed if base reg not in list for other types. */
8816 else if (range
& (1 << base_reg
))
8817 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8821 /* Not allowed for type 2. */
8822 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8823 as_warn (_("writeback of base register is UNPREDICTABLE"));
8824 /* Only allowed if base reg not in list, or first in list. */
8825 else if ((range
& (1 << base_reg
))
8826 && (range
& ((1 << base_reg
) - 1)))
8827 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8831 /* If PUSH/POP has only one register, then use the A2 encoding. */
8832 one_reg
= only_one_reg_in_list (range
);
8833 if (from_push_pop_mnem
&& one_reg
>= 0)
8835 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8837 inst
.instruction
&= A_COND_MASK
;
8838 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8839 inst
.instruction
|= one_reg
<< 12;
8846 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8849 /* ARMv5TE load-consecutive (argument parse)
8858 constraint (inst
.operands
[0].reg
% 2 != 0,
8859 _("first transfer register must be even"));
8860 constraint (inst
.operands
[1].present
8861 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8862 _("can only transfer two consecutive registers"));
8863 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8864 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8866 if (!inst
.operands
[1].present
)
8867 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8869 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8870 register and the first register written; we have to diagnose
8871 overlap between the base and the second register written here. */
8873 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8874 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8875 as_warn (_("base register written back, and overlaps "
8876 "second transfer register"));
8878 if (!(inst
.instruction
& V4_STR_BIT
))
8880 /* For an index-register load, the index register must not overlap the
8881 destination (even if not write-back). */
8882 if (inst
.operands
[2].immisreg
8883 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8884 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8885 as_warn (_("index register overlaps transfer register"));
8887 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8888 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8894 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8895 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8896 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8897 || inst
.operands
[1].negative
8898 /* This can arise if the programmer has written
8900 or if they have mistakenly used a register name as the last
8903 It is very difficult to distinguish between these two cases
8904 because "rX" might actually be a label. ie the register
8905 name has been occluded by a symbol of the same name. So we
8906 just generate a general 'bad addressing mode' type error
8907 message and leave it up to the programmer to discover the
8908 true cause and fix their mistake. */
8909 || (inst
.operands
[1].reg
== REG_PC
),
8912 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8913 || inst
.reloc
.exp
.X_add_number
!= 0,
8914 _("offset must be zero in ARM encoding"));
8916 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8918 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8919 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8920 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8926 constraint (inst
.operands
[0].reg
% 2 != 0,
8927 _("even register required"));
8928 constraint (inst
.operands
[1].present
8929 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8930 _("can only load two consecutive registers"));
8931 /* If op 1 were present and equal to PC, this function wouldn't
8932 have been called in the first place. */
8933 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8935 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8936 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8939 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8940 which is not a multiple of four is UNPREDICTABLE. */
8942 check_ldr_r15_aligned (void)
8944 constraint (!(inst
.operands
[1].immisreg
)
8945 && (inst
.operands
[0].reg
== REG_PC
8946 && inst
.operands
[1].reg
== REG_PC
8947 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8948 _("ldr to register 15 must be 4-byte alligned"));
8954 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8955 if (!inst
.operands
[1].isreg
)
8956 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8958 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8959 check_ldr_r15_aligned ();
8965 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8967 if (inst
.operands
[1].preind
)
8969 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8970 || inst
.reloc
.exp
.X_add_number
!= 0,
8971 _("this instruction requires a post-indexed address"));
8973 inst
.operands
[1].preind
= 0;
8974 inst
.operands
[1].postind
= 1;
8975 inst
.operands
[1].writeback
= 1;
8977 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8978 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
8981 /* Halfword and signed-byte load/store operations. */
8986 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8987 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8988 if (!inst
.operands
[1].isreg
)
8989 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
8991 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
8997 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8999 if (inst
.operands
[1].preind
)
9001 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9002 || inst
.reloc
.exp
.X_add_number
!= 0,
9003 _("this instruction requires a post-indexed address"));
9005 inst
.operands
[1].preind
= 0;
9006 inst
.operands
[1].postind
= 1;
9007 inst
.operands
[1].writeback
= 1;
9009 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9010 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9013 /* Co-processor register load/store.
9014 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9018 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9019 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9020 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9026 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9027 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9028 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9029 && !(inst
.instruction
& 0x00400000))
9030 as_tsktsk (_("Rd and Rm should be different in mla"));
9032 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9033 inst
.instruction
|= inst
.operands
[1].reg
;
9034 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9035 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9041 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9042 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9044 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9045 encode_arm_shifter_operand (1);
9048 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9055 top
= (inst
.instruction
& 0x00400000) != 0;
9056 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
9057 _(":lower16: not allowed this instruction"));
9058 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
9059 _(":upper16: not allowed instruction"));
9060 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9061 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9063 imm
= inst
.reloc
.exp
.X_add_number
;
9064 /* The value is in two pieces: 0:11, 16:19. */
9065 inst
.instruction
|= (imm
& 0x00000fff);
9066 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9071 do_vfp_nsyn_mrs (void)
9073 if (inst
.operands
[0].isvec
)
9075 if (inst
.operands
[1].reg
!= 1)
9076 first_error (_("operand 1 must be FPSCR"));
9077 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9078 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9079 do_vfp_nsyn_opcode ("fmstat");
9081 else if (inst
.operands
[1].isvec
)
9082 do_vfp_nsyn_opcode ("fmrx");
9090 do_vfp_nsyn_msr (void)
9092 if (inst
.operands
[0].isvec
)
9093 do_vfp_nsyn_opcode ("fmxr");
9103 unsigned Rt
= inst
.operands
[0].reg
;
9105 if (thumb_mode
&& Rt
== REG_SP
)
9107 inst
.error
= BAD_SP
;
9111 /* APSR_ sets isvec. All other refs to PC are illegal. */
9112 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9114 inst
.error
= BAD_PC
;
9118 /* If we get through parsing the register name, we just insert the number
9119 generated into the instruction without further validation. */
9120 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9121 inst
.instruction
|= (Rt
<< 12);
9127 unsigned Rt
= inst
.operands
[1].reg
;
9130 reject_bad_reg (Rt
);
9131 else if (Rt
== REG_PC
)
9133 inst
.error
= BAD_PC
;
9137 /* If we get through parsing the register name, we just insert the number
9138 generated into the instruction without further validation. */
9139 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9140 inst
.instruction
|= (Rt
<< 12);
9148 if (do_vfp_nsyn_mrs () == SUCCESS
)
9151 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9152 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9154 if (inst
.operands
[1].isreg
)
9156 br
= inst
.operands
[1].reg
;
9157 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9158 as_bad (_("bad register for mrs"));
9162 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9163 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9165 _("'APSR', 'CPSR' or 'SPSR' expected"));
9166 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9169 inst
.instruction
|= br
;
9172 /* Two possible forms:
9173 "{C|S}PSR_<field>, Rm",
9174 "{C|S}PSR_f, #expression". */
9179 if (do_vfp_nsyn_msr () == SUCCESS
)
9182 inst
.instruction
|= inst
.operands
[0].imm
;
9183 if (inst
.operands
[1].isreg
)
9184 inst
.instruction
|= inst
.operands
[1].reg
;
9187 inst
.instruction
|= INST_IMMEDIATE
;
9188 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9189 inst
.reloc
.pc_rel
= 0;
9196 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9198 if (!inst
.operands
[2].present
)
9199 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9200 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9201 inst
.instruction
|= inst
.operands
[1].reg
;
9202 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9204 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9205 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9206 as_tsktsk (_("Rd and Rm should be different in mul"));
9209 /* Long Multiply Parser
9210 UMULL RdLo, RdHi, Rm, Rs
9211 SMULL RdLo, RdHi, Rm, Rs
9212 UMLAL RdLo, RdHi, Rm, Rs
9213 SMLAL RdLo, RdHi, Rm, Rs. */
9218 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9219 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9220 inst
.instruction
|= inst
.operands
[2].reg
;
9221 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9223 /* rdhi and rdlo must be different. */
9224 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9225 as_tsktsk (_("rdhi and rdlo must be different"));
9227 /* rdhi, rdlo and rm must all be different before armv6. */
9228 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9229 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9230 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9231 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9237 if (inst
.operands
[0].present
9238 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9240 /* Architectural NOP hints are CPSR sets with no bits selected. */
9241 inst
.instruction
&= 0xf0000000;
9242 inst
.instruction
|= 0x0320f000;
9243 if (inst
.operands
[0].present
)
9244 inst
.instruction
|= inst
.operands
[0].imm
;
9248 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9249 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9250 Condition defaults to COND_ALWAYS.
9251 Error if Rd, Rn or Rm are R15. */
9256 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9257 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9258 inst
.instruction
|= inst
.operands
[2].reg
;
9259 if (inst
.operands
[3].present
)
9260 encode_arm_shift (3);
9263 /* ARM V6 PKHTB (Argument Parse). */
9268 if (!inst
.operands
[3].present
)
9270 /* If the shift specifier is omitted, turn the instruction
9271 into pkhbt rd, rm, rn. */
9272 inst
.instruction
&= 0xfff00010;
9273 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9274 inst
.instruction
|= inst
.operands
[1].reg
;
9275 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9279 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9280 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9281 inst
.instruction
|= inst
.operands
[2].reg
;
9282 encode_arm_shift (3);
9286 /* ARMv5TE: Preload-Cache
9287 MP Extensions: Preload for write
9291 Syntactically, like LDR with B=1, W=0, L=1. */
9296 constraint (!inst
.operands
[0].isreg
,
9297 _("'[' expected after PLD mnemonic"));
9298 constraint (inst
.operands
[0].postind
,
9299 _("post-indexed expression used in preload instruction"));
9300 constraint (inst
.operands
[0].writeback
,
9301 _("writeback used in preload instruction"));
9302 constraint (!inst
.operands
[0].preind
,
9303 _("unindexed addressing used in preload instruction"));
9304 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9307 /* ARMv7: PLI <addr_mode> */
9311 constraint (!inst
.operands
[0].isreg
,
9312 _("'[' expected after PLI mnemonic"));
9313 constraint (inst
.operands
[0].postind
,
9314 _("post-indexed expression used in preload instruction"));
9315 constraint (inst
.operands
[0].writeback
,
9316 _("writeback used in preload instruction"));
9317 constraint (!inst
.operands
[0].preind
,
9318 _("unindexed addressing used in preload instruction"));
9319 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9320 inst
.instruction
&= ~PRE_INDEX
;
9326 constraint (inst
.operands
[0].writeback
,
9327 _("push/pop do not support {reglist}^"));
9328 inst
.operands
[1] = inst
.operands
[0];
9329 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9330 inst
.operands
[0].isreg
= 1;
9331 inst
.operands
[0].writeback
= 1;
9332 inst
.operands
[0].reg
= REG_SP
;
9333 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9336 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9337 word at the specified address and the following word
9339 Unconditionally executed.
9340 Error if Rn is R15. */
9345 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9346 if (inst
.operands
[0].writeback
)
9347 inst
.instruction
|= WRITE_BACK
;
9350 /* ARM V6 ssat (argument parse). */
9355 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9356 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9357 inst
.instruction
|= inst
.operands
[2].reg
;
9359 if (inst
.operands
[3].present
)
9360 encode_arm_shift (3);
9363 /* ARM V6 usat (argument parse). */
9368 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9369 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9370 inst
.instruction
|= inst
.operands
[2].reg
;
9372 if (inst
.operands
[3].present
)
9373 encode_arm_shift (3);
9376 /* ARM V6 ssat16 (argument parse). */
9381 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9382 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9383 inst
.instruction
|= inst
.operands
[2].reg
;
9389 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9390 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9391 inst
.instruction
|= inst
.operands
[2].reg
;
9394 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9395 preserving the other bits.
9397 setend <endian_specifier>, where <endian_specifier> is either
9403 if (warn_on_deprecated
9404 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9405 as_tsktsk (_("setend use is deprecated for ARMv8"));
9407 if (inst
.operands
[0].imm
)
9408 inst
.instruction
|= 0x200;
9414 unsigned int Rm
= (inst
.operands
[1].present
9415 ? inst
.operands
[1].reg
9416 : inst
.operands
[0].reg
);
9418 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9419 inst
.instruction
|= Rm
;
9420 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9422 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9423 inst
.instruction
|= SHIFT_BY_REG
;
9424 /* PR 12854: Error on extraneous shifts. */
9425 constraint (inst
.operands
[2].shifted
,
9426 _("extraneous shift as part of operand to shift insn"));
9429 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9435 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9436 inst
.reloc
.pc_rel
= 0;
9442 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9443 inst
.reloc
.pc_rel
= 0;
9449 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9450 inst
.reloc
.pc_rel
= 0;
9456 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9457 _("selected processor does not support SETPAN instruction"));
9459 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9465 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9466 _("selected processor does not support SETPAN instruction"));
9468 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9471 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9472 SMLAxy{cond} Rd,Rm,Rs,Rn
9473 SMLAWy{cond} Rd,Rm,Rs,Rn
9474 Error if any register is R15. */
9479 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9480 inst
.instruction
|= inst
.operands
[1].reg
;
9481 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9482 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9485 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9486 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9487 Error if any register is R15.
9488 Warning if Rdlo == Rdhi. */
9493 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9494 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9495 inst
.instruction
|= inst
.operands
[2].reg
;
9496 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9498 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9499 as_tsktsk (_("rdhi and rdlo must be different"));
9502 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9503 SMULxy{cond} Rd,Rm,Rs
9504 Error if any register is R15. */
9509 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9510 inst
.instruction
|= inst
.operands
[1].reg
;
9511 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9514 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9515 the same for both ARM and Thumb-2. */
9522 if (inst
.operands
[0].present
)
9524 reg
= inst
.operands
[0].reg
;
9525 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9530 inst
.instruction
|= reg
<< 16;
9531 inst
.instruction
|= inst
.operands
[1].imm
;
9532 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9533 inst
.instruction
|= WRITE_BACK
;
9536 /* ARM V6 strex (argument parse). */
9541 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9542 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9543 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9544 || inst
.operands
[2].negative
9545 /* See comment in do_ldrex(). */
9546 || (inst
.operands
[2].reg
== REG_PC
),
9549 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9550 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9552 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9553 || inst
.reloc
.exp
.X_add_number
!= 0,
9554 _("offset must be zero in ARM encoding"));
9556 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9557 inst
.instruction
|= inst
.operands
[1].reg
;
9558 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9559 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9565 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9566 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9567 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9568 || inst
.operands
[2].negative
,
9571 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9572 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9580 constraint (inst
.operands
[1].reg
% 2 != 0,
9581 _("even register required"));
9582 constraint (inst
.operands
[2].present
9583 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9584 _("can only store two consecutive registers"));
9585 /* If op 2 were present and equal to PC, this function wouldn't
9586 have been called in the first place. */
9587 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9589 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9590 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9591 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9594 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9595 inst
.instruction
|= inst
.operands
[1].reg
;
9596 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9603 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9604 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9612 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9613 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9618 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9619 extends it to 32-bits, and adds the result to a value in another
9620 register. You can specify a rotation by 0, 8, 16, or 24 bits
9621 before extracting the 16-bit value.
9622 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9623 Condition defaults to COND_ALWAYS.
9624 Error if any register uses R15. */
9629 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9630 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9631 inst
.instruction
|= inst
.operands
[2].reg
;
9632 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9637 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9638 Condition defaults to COND_ALWAYS.
9639 Error if any register uses R15. */
9644 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9645 inst
.instruction
|= inst
.operands
[1].reg
;
9646 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9649 /* VFP instructions. In a logical order: SP variant first, monad
9650 before dyad, arithmetic then move then load/store. */
9653 do_vfp_sp_monadic (void)
9655 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9656 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9660 do_vfp_sp_dyadic (void)
9662 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9663 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9664 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9668 do_vfp_sp_compare_z (void)
9670 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9674 do_vfp_dp_sp_cvt (void)
9676 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9677 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9681 do_vfp_sp_dp_cvt (void)
9683 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9684 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9688 do_vfp_reg_from_sp (void)
9690 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9691 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9695 do_vfp_reg2_from_sp2 (void)
9697 constraint (inst
.operands
[2].imm
!= 2,
9698 _("only two consecutive VFP SP registers allowed here"));
9699 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9700 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9701 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9705 do_vfp_sp_from_reg (void)
9707 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9708 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9712 do_vfp_sp2_from_reg2 (void)
9714 constraint (inst
.operands
[0].imm
!= 2,
9715 _("only two consecutive VFP SP registers allowed here"));
9716 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9717 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9718 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9722 do_vfp_sp_ldst (void)
9724 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9725 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9729 do_vfp_dp_ldst (void)
9731 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9732 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9737 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9739 if (inst
.operands
[0].writeback
)
9740 inst
.instruction
|= WRITE_BACK
;
9742 constraint (ldstm_type
!= VFP_LDSTMIA
,
9743 _("this addressing mode requires base-register writeback"));
9744 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9745 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9746 inst
.instruction
|= inst
.operands
[1].imm
;
9750 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9754 if (inst
.operands
[0].writeback
)
9755 inst
.instruction
|= WRITE_BACK
;
9757 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9758 _("this addressing mode requires base-register writeback"));
9760 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9761 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9763 count
= inst
.operands
[1].imm
<< 1;
9764 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9767 inst
.instruction
|= count
;
9771 do_vfp_sp_ldstmia (void)
9773 vfp_sp_ldstm (VFP_LDSTMIA
);
9777 do_vfp_sp_ldstmdb (void)
9779 vfp_sp_ldstm (VFP_LDSTMDB
);
9783 do_vfp_dp_ldstmia (void)
9785 vfp_dp_ldstm (VFP_LDSTMIA
);
9789 do_vfp_dp_ldstmdb (void)
9791 vfp_dp_ldstm (VFP_LDSTMDB
);
9795 do_vfp_xp_ldstmia (void)
9797 vfp_dp_ldstm (VFP_LDSTMIAX
);
9801 do_vfp_xp_ldstmdb (void)
9803 vfp_dp_ldstm (VFP_LDSTMDBX
);
9807 do_vfp_dp_rd_rm (void)
9809 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9810 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9814 do_vfp_dp_rn_rd (void)
9816 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9817 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9821 do_vfp_dp_rd_rn (void)
9823 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9824 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9828 do_vfp_dp_rd_rn_rm (void)
9830 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9831 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9832 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9838 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9842 do_vfp_dp_rm_rd_rn (void)
9844 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9845 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9846 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9849 /* VFPv3 instructions. */
9851 do_vfp_sp_const (void)
9853 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9854 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9855 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9859 do_vfp_dp_const (void)
9861 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9862 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9863 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9867 vfp_conv (int srcsize
)
9869 int immbits
= srcsize
- inst
.operands
[1].imm
;
9871 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9873 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9874 i.e. immbits must be in range 0 - 16. */
9875 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9878 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9880 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9881 i.e. immbits must be in range 0 - 31. */
9882 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9886 inst
.instruction
|= (immbits
& 1) << 5;
9887 inst
.instruction
|= (immbits
>> 1);
9891 do_vfp_sp_conv_16 (void)
9893 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9898 do_vfp_dp_conv_16 (void)
9900 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9905 do_vfp_sp_conv_32 (void)
9907 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9912 do_vfp_dp_conv_32 (void)
9914 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9918 /* FPA instructions. Also in a logical order. */
9923 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9924 inst
.instruction
|= inst
.operands
[1].reg
;
9928 do_fpa_ldmstm (void)
9930 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9931 switch (inst
.operands
[1].imm
)
9933 case 1: inst
.instruction
|= CP_T_X
; break;
9934 case 2: inst
.instruction
|= CP_T_Y
; break;
9935 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9940 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9942 /* The instruction specified "ea" or "fd", so we can only accept
9943 [Rn]{!}. The instruction does not really support stacking or
9944 unstacking, so we have to emulate these by setting appropriate
9945 bits and offsets. */
9946 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9947 || inst
.reloc
.exp
.X_add_number
!= 0,
9948 _("this instruction does not support indexing"));
9950 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9951 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9953 if (!(inst
.instruction
& INDEX_UP
))
9954 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9956 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9958 inst
.operands
[2].preind
= 0;
9959 inst
.operands
[2].postind
= 1;
9963 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9966 /* iWMMXt instructions: strictly in alphabetical order. */
9969 do_iwmmxt_tandorc (void)
9971 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
9975 do_iwmmxt_textrc (void)
9977 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9978 inst
.instruction
|= inst
.operands
[1].imm
;
9982 do_iwmmxt_textrm (void)
9984 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9985 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9986 inst
.instruction
|= inst
.operands
[2].imm
;
9990 do_iwmmxt_tinsr (void)
9992 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9993 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9994 inst
.instruction
|= inst
.operands
[2].imm
;
9998 do_iwmmxt_tmia (void)
10000 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10001 inst
.instruction
|= inst
.operands
[1].reg
;
10002 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10006 do_iwmmxt_waligni (void)
10008 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10009 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10010 inst
.instruction
|= inst
.operands
[2].reg
;
10011 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10015 do_iwmmxt_wmerge (void)
10017 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10018 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10019 inst
.instruction
|= inst
.operands
[2].reg
;
10020 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10024 do_iwmmxt_wmov (void)
10026 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10027 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10028 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10029 inst
.instruction
|= inst
.operands
[1].reg
;
10033 do_iwmmxt_wldstbh (void)
10036 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10038 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10040 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10041 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10045 do_iwmmxt_wldstw (void)
10047 /* RIWR_RIWC clears .isreg for a control register. */
10048 if (!inst
.operands
[0].isreg
)
10050 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10051 inst
.instruction
|= 0xf0000000;
10054 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10055 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10059 do_iwmmxt_wldstd (void)
10061 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10062 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10063 && inst
.operands
[1].immisreg
)
10065 inst
.instruction
&= ~0x1a000ff;
10066 inst
.instruction
|= (0xfU
<< 28);
10067 if (inst
.operands
[1].preind
)
10068 inst
.instruction
|= PRE_INDEX
;
10069 if (!inst
.operands
[1].negative
)
10070 inst
.instruction
|= INDEX_UP
;
10071 if (inst
.operands
[1].writeback
)
10072 inst
.instruction
|= WRITE_BACK
;
10073 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10074 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10075 inst
.instruction
|= inst
.operands
[1].imm
;
10078 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10082 do_iwmmxt_wshufh (void)
10084 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10085 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10086 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10087 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10091 do_iwmmxt_wzero (void)
10093 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10094 inst
.instruction
|= inst
.operands
[0].reg
;
10095 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10096 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10100 do_iwmmxt_wrwrwr_or_imm5 (void)
10102 if (inst
.operands
[2].isreg
)
10105 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10106 _("immediate operand requires iWMMXt2"));
10108 if (inst
.operands
[2].imm
== 0)
10110 switch ((inst
.instruction
>> 20) & 0xf)
10116 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10117 inst
.operands
[2].imm
= 16;
10118 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10124 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10125 inst
.operands
[2].imm
= 32;
10126 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10133 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10135 wrn
= (inst
.instruction
>> 16) & 0xf;
10136 inst
.instruction
&= 0xff0fff0f;
10137 inst
.instruction
|= wrn
;
10138 /* Bail out here; the instruction is now assembled. */
10143 /* Map 32 -> 0, etc. */
10144 inst
.operands
[2].imm
&= 0x1f;
10145 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10149 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10150 operations first, then control, shift, and load/store. */
10152 /* Insns like "foo X,Y,Z". */
10155 do_mav_triple (void)
10157 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10158 inst
.instruction
|= inst
.operands
[1].reg
;
10159 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10162 /* Insns like "foo W,X,Y,Z".
10163 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10168 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10169 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10170 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10171 inst
.instruction
|= inst
.operands
[3].reg
;
10174 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10176 do_mav_dspsc (void)
10178 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10181 /* Maverick shift immediate instructions.
10182 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10183 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10186 do_mav_shift (void)
10188 int imm
= inst
.operands
[2].imm
;
10190 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10191 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10193 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10194 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10195 Bit 4 should be 0. */
10196 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10198 inst
.instruction
|= imm
;
10201 /* XScale instructions. Also sorted arithmetic before move. */
10203 /* Xscale multiply-accumulate (argument parse)
10206 MIAxycc acc0,Rm,Rs. */
10211 inst
.instruction
|= inst
.operands
[1].reg
;
10212 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10215 /* Xscale move-accumulator-register (argument parse)
10217 MARcc acc0,RdLo,RdHi. */
10222 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10223 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10226 /* Xscale move-register-accumulator (argument parse)
10228 MRAcc RdLo,RdHi,acc0. */
10233 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10234 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10235 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10238 /* Encoding functions relevant only to Thumb. */
10240 /* inst.operands[i] is a shifted-register operand; encode
10241 it into inst.instruction in the format used by Thumb32. */
10244 encode_thumb32_shifted_operand (int i
)
10246 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10247 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10249 constraint (inst
.operands
[i
].immisreg
,
10250 _("shift by register not allowed in thumb mode"));
10251 inst
.instruction
|= inst
.operands
[i
].reg
;
10252 if (shift
== SHIFT_RRX
)
10253 inst
.instruction
|= SHIFT_ROR
<< 4;
10256 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10257 _("expression too complex"));
10259 constraint (value
> 32
10260 || (value
== 32 && (shift
== SHIFT_LSL
10261 || shift
== SHIFT_ROR
)),
10262 _("shift expression is too large"));
10266 else if (value
== 32)
10269 inst
.instruction
|= shift
<< 4;
10270 inst
.instruction
|= (value
& 0x1c) << 10;
10271 inst
.instruction
|= (value
& 0x03) << 6;
10276 /* inst.operands[i] was set up by parse_address. Encode it into a
10277 Thumb32 format load or store instruction. Reject forms that cannot
10278 be used with such instructions. If is_t is true, reject forms that
10279 cannot be used with a T instruction; if is_d is true, reject forms
10280 that cannot be used with a D instruction. If it is a store insn,
10281 reject PC in Rn. */
10284 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10286 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10288 constraint (!inst
.operands
[i
].isreg
,
10289 _("Instruction does not support =N addresses"));
10291 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10292 if (inst
.operands
[i
].immisreg
)
10294 constraint (is_pc
, BAD_PC_ADDRESSING
);
10295 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10296 constraint (inst
.operands
[i
].negative
,
10297 _("Thumb does not support negative register indexing"));
10298 constraint (inst
.operands
[i
].postind
,
10299 _("Thumb does not support register post-indexing"));
10300 constraint (inst
.operands
[i
].writeback
,
10301 _("Thumb does not support register indexing with writeback"));
10302 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10303 _("Thumb supports only LSL in shifted register indexing"));
10305 inst
.instruction
|= inst
.operands
[i
].imm
;
10306 if (inst
.operands
[i
].shifted
)
10308 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10309 _("expression too complex"));
10310 constraint (inst
.reloc
.exp
.X_add_number
< 0
10311 || inst
.reloc
.exp
.X_add_number
> 3,
10312 _("shift out of range"));
10313 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10315 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10317 else if (inst
.operands
[i
].preind
)
10319 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10320 constraint (is_t
&& inst
.operands
[i
].writeback
,
10321 _("cannot use writeback with this instruction"));
10322 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10323 BAD_PC_ADDRESSING
);
10327 inst
.instruction
|= 0x01000000;
10328 if (inst
.operands
[i
].writeback
)
10329 inst
.instruction
|= 0x00200000;
10333 inst
.instruction
|= 0x00000c00;
10334 if (inst
.operands
[i
].writeback
)
10335 inst
.instruction
|= 0x00000100;
10337 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10339 else if (inst
.operands
[i
].postind
)
10341 gas_assert (inst
.operands
[i
].writeback
);
10342 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10343 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10346 inst
.instruction
|= 0x00200000;
10348 inst
.instruction
|= 0x00000900;
10349 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10351 else /* unindexed - only for coprocessor */
10352 inst
.error
= _("instruction does not accept unindexed addressing");
10355 /* Table of Thumb instructions which exist in both 16- and 32-bit
10356 encodings (the latter only in post-V6T2 cores). The index is the
10357 value used in the insns table below. When there is more than one
10358 possible 16-bit encoding for the instruction, this table always
10360 Also contains several pseudo-instructions used during relaxation. */
10361 #define T16_32_TAB \
10362 X(_adc, 4140, eb400000), \
10363 X(_adcs, 4140, eb500000), \
10364 X(_add, 1c00, eb000000), \
10365 X(_adds, 1c00, eb100000), \
10366 X(_addi, 0000, f1000000), \
10367 X(_addis, 0000, f1100000), \
10368 X(_add_pc,000f, f20f0000), \
10369 X(_add_sp,000d, f10d0000), \
10370 X(_adr, 000f, f20f0000), \
10371 X(_and, 4000, ea000000), \
10372 X(_ands, 4000, ea100000), \
10373 X(_asr, 1000, fa40f000), \
10374 X(_asrs, 1000, fa50f000), \
10375 X(_b, e000, f000b000), \
10376 X(_bcond, d000, f0008000), \
10377 X(_bic, 4380, ea200000), \
10378 X(_bics, 4380, ea300000), \
10379 X(_cmn, 42c0, eb100f00), \
10380 X(_cmp, 2800, ebb00f00), \
10381 X(_cpsie, b660, f3af8400), \
10382 X(_cpsid, b670, f3af8600), \
10383 X(_cpy, 4600, ea4f0000), \
10384 X(_dec_sp,80dd, f1ad0d00), \
10385 X(_eor, 4040, ea800000), \
10386 X(_eors, 4040, ea900000), \
10387 X(_inc_sp,00dd, f10d0d00), \
10388 X(_ldmia, c800, e8900000), \
10389 X(_ldr, 6800, f8500000), \
10390 X(_ldrb, 7800, f8100000), \
10391 X(_ldrh, 8800, f8300000), \
10392 X(_ldrsb, 5600, f9100000), \
10393 X(_ldrsh, 5e00, f9300000), \
10394 X(_ldr_pc,4800, f85f0000), \
10395 X(_ldr_pc2,4800, f85f0000), \
10396 X(_ldr_sp,9800, f85d0000), \
10397 X(_lsl, 0000, fa00f000), \
10398 X(_lsls, 0000, fa10f000), \
10399 X(_lsr, 0800, fa20f000), \
10400 X(_lsrs, 0800, fa30f000), \
10401 X(_mov, 2000, ea4f0000), \
10402 X(_movs, 2000, ea5f0000), \
10403 X(_mul, 4340, fb00f000), \
10404 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10405 X(_mvn, 43c0, ea6f0000), \
10406 X(_mvns, 43c0, ea7f0000), \
10407 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10408 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10409 X(_orr, 4300, ea400000), \
10410 X(_orrs, 4300, ea500000), \
10411 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10412 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10413 X(_rev, ba00, fa90f080), \
10414 X(_rev16, ba40, fa90f090), \
10415 X(_revsh, bac0, fa90f0b0), \
10416 X(_ror, 41c0, fa60f000), \
10417 X(_rors, 41c0, fa70f000), \
10418 X(_sbc, 4180, eb600000), \
10419 X(_sbcs, 4180, eb700000), \
10420 X(_stmia, c000, e8800000), \
10421 X(_str, 6000, f8400000), \
10422 X(_strb, 7000, f8000000), \
10423 X(_strh, 8000, f8200000), \
10424 X(_str_sp,9000, f84d0000), \
10425 X(_sub, 1e00, eba00000), \
10426 X(_subs, 1e00, ebb00000), \
10427 X(_subi, 8000, f1a00000), \
10428 X(_subis, 8000, f1b00000), \
10429 X(_sxtb, b240, fa4ff080), \
10430 X(_sxth, b200, fa0ff080), \
10431 X(_tst, 4200, ea100f00), \
10432 X(_uxtb, b2c0, fa5ff080), \
10433 X(_uxth, b280, fa1ff080), \
10434 X(_nop, bf00, f3af8000), \
10435 X(_yield, bf10, f3af8001), \
10436 X(_wfe, bf20, f3af8002), \
10437 X(_wfi, bf30, f3af8003), \
10438 X(_sev, bf40, f3af8004), \
10439 X(_sevl, bf50, f3af8005), \
10440 X(_udf, de00, f7f0a000)
10442 /* To catch errors in encoding functions, the codes are all offset by
10443 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10444 as 16-bit instructions. */
10445 #define X(a,b,c) T_MNEM##a
10446 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10449 #define X(a,b,c) 0x##b
10450 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10451 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10454 #define X(a,b,c) 0x##c
10455 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10456 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10457 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10461 /* Thumb instruction encoders, in alphabetical order. */
10463 /* ADDW or SUBW. */
10466 do_t_add_sub_w (void)
10470 Rd
= inst
.operands
[0].reg
;
10471 Rn
= inst
.operands
[1].reg
;
10473 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10474 is the SP-{plus,minus}-immediate form of the instruction. */
10476 constraint (Rd
== REG_PC
, BAD_PC
);
10478 reject_bad_reg (Rd
);
10480 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10481 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10484 /* Parse an add or subtract instruction. We get here with inst.instruction
10485 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10488 do_t_add_sub (void)
10492 Rd
= inst
.operands
[0].reg
;
10493 Rs
= (inst
.operands
[1].present
10494 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10495 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10498 set_it_insn_type_last ();
10500 if (unified_syntax
)
10503 bfd_boolean narrow
;
10506 flags
= (inst
.instruction
== T_MNEM_adds
10507 || inst
.instruction
== T_MNEM_subs
);
10509 narrow
= !in_it_block ();
10511 narrow
= in_it_block ();
10512 if (!inst
.operands
[2].isreg
)
10516 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10518 add
= (inst
.instruction
== T_MNEM_add
10519 || inst
.instruction
== T_MNEM_adds
);
10521 if (inst
.size_req
!= 4)
10523 /* Attempt to use a narrow opcode, with relaxation if
10525 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10526 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10527 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10528 opcode
= T_MNEM_add_sp
;
10529 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10530 opcode
= T_MNEM_add_pc
;
10531 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10534 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10536 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10540 inst
.instruction
= THUMB_OP16(opcode
);
10541 inst
.instruction
|= (Rd
<< 4) | Rs
;
10542 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10543 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10545 if (inst
.size_req
== 2)
10546 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10548 inst
.relax
= opcode
;
10552 constraint (inst
.size_req
== 2, BAD_HIREG
);
10554 if (inst
.size_req
== 4
10555 || (inst
.size_req
!= 2 && !opcode
))
10557 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10558 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
10559 THUMB1_RELOC_ONLY
);
10562 constraint (add
, BAD_PC
);
10563 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10564 _("only SUBS PC, LR, #const allowed"));
10565 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10566 _("expression too complex"));
10567 constraint (inst
.reloc
.exp
.X_add_number
< 0
10568 || inst
.reloc
.exp
.X_add_number
> 0xff,
10569 _("immediate value out of range"));
10570 inst
.instruction
= T2_SUBS_PC_LR
10571 | inst
.reloc
.exp
.X_add_number
;
10572 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10575 else if (Rs
== REG_PC
)
10577 /* Always use addw/subw. */
10578 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10579 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10583 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10584 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10587 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10589 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10591 inst
.instruction
|= Rd
<< 8;
10592 inst
.instruction
|= Rs
<< 16;
10597 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10598 unsigned int shift
= inst
.operands
[2].shift_kind
;
10600 Rn
= inst
.operands
[2].reg
;
10601 /* See if we can do this with a 16-bit instruction. */
10602 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10604 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10609 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10610 || inst
.instruction
== T_MNEM_add
)
10612 : T_OPCODE_SUB_R3
);
10613 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10617 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10619 /* Thumb-1 cores (except v6-M) require at least one high
10620 register in a narrow non flag setting add. */
10621 if (Rd
> 7 || Rn
> 7
10622 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10623 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10630 inst
.instruction
= T_OPCODE_ADD_HI
;
10631 inst
.instruction
|= (Rd
& 8) << 4;
10632 inst
.instruction
|= (Rd
& 7);
10633 inst
.instruction
|= Rn
<< 3;
10639 constraint (Rd
== REG_PC
, BAD_PC
);
10640 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10641 constraint (Rs
== REG_PC
, BAD_PC
);
10642 reject_bad_reg (Rn
);
10644 /* If we get here, it can't be done in 16 bits. */
10645 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10646 _("shift must be constant"));
10647 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10648 inst
.instruction
|= Rd
<< 8;
10649 inst
.instruction
|= Rs
<< 16;
10650 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10651 _("shift value over 3 not allowed in thumb mode"));
10652 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10653 _("only LSL shift allowed in thumb mode"));
10654 encode_thumb32_shifted_operand (2);
10659 constraint (inst
.instruction
== T_MNEM_adds
10660 || inst
.instruction
== T_MNEM_subs
,
10663 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10665 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10666 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10669 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10670 ? 0x0000 : 0x8000);
10671 inst
.instruction
|= (Rd
<< 4) | Rs
;
10672 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10676 Rn
= inst
.operands
[2].reg
;
10677 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10679 /* We now have Rd, Rs, and Rn set to registers. */
10680 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10682 /* Can't do this for SUB. */
10683 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10684 inst
.instruction
= T_OPCODE_ADD_HI
;
10685 inst
.instruction
|= (Rd
& 8) << 4;
10686 inst
.instruction
|= (Rd
& 7);
10688 inst
.instruction
|= Rn
<< 3;
10690 inst
.instruction
|= Rs
<< 3;
10692 constraint (1, _("dest must overlap one source register"));
10696 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10697 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10698 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10708 Rd
= inst
.operands
[0].reg
;
10709 reject_bad_reg (Rd
);
10711 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10713 /* Defer to section relaxation. */
10714 inst
.relax
= inst
.instruction
;
10715 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10716 inst
.instruction
|= Rd
<< 4;
10718 else if (unified_syntax
&& inst
.size_req
!= 2)
10720 /* Generate a 32-bit opcode. */
10721 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10722 inst
.instruction
|= Rd
<< 8;
10723 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10724 inst
.reloc
.pc_rel
= 1;
10728 /* Generate a 16-bit opcode. */
10729 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10730 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10731 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10732 inst
.reloc
.pc_rel
= 1;
10734 inst
.instruction
|= Rd
<< 4;
10738 /* Arithmetic instructions for which there is just one 16-bit
10739 instruction encoding, and it allows only two low registers.
10740 For maximal compatibility with ARM syntax, we allow three register
10741 operands even when Thumb-32 instructions are not available, as long
10742 as the first two are identical. For instance, both "sbc r0,r1" and
10743 "sbc r0,r0,r1" are allowed. */
10749 Rd
= inst
.operands
[0].reg
;
10750 Rs
= (inst
.operands
[1].present
10751 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10752 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10753 Rn
= inst
.operands
[2].reg
;
10755 reject_bad_reg (Rd
);
10756 reject_bad_reg (Rs
);
10757 if (inst
.operands
[2].isreg
)
10758 reject_bad_reg (Rn
);
10760 if (unified_syntax
)
10762 if (!inst
.operands
[2].isreg
)
10764 /* For an immediate, we always generate a 32-bit opcode;
10765 section relaxation will shrink it later if possible. */
10766 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10767 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10768 inst
.instruction
|= Rd
<< 8;
10769 inst
.instruction
|= Rs
<< 16;
10770 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10774 bfd_boolean narrow
;
10776 /* See if we can do this with a 16-bit instruction. */
10777 if (THUMB_SETS_FLAGS (inst
.instruction
))
10778 narrow
= !in_it_block ();
10780 narrow
= in_it_block ();
10782 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10784 if (inst
.operands
[2].shifted
)
10786 if (inst
.size_req
== 4)
10792 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10793 inst
.instruction
|= Rd
;
10794 inst
.instruction
|= Rn
<< 3;
10798 /* If we get here, it can't be done in 16 bits. */
10799 constraint (inst
.operands
[2].shifted
10800 && inst
.operands
[2].immisreg
,
10801 _("shift must be constant"));
10802 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10803 inst
.instruction
|= Rd
<< 8;
10804 inst
.instruction
|= Rs
<< 16;
10805 encode_thumb32_shifted_operand (2);
10810 /* On its face this is a lie - the instruction does set the
10811 flags. However, the only supported mnemonic in this mode
10812 says it doesn't. */
10813 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10815 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10816 _("unshifted register required"));
10817 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10818 constraint (Rd
!= Rs
,
10819 _("dest and source1 must be the same register"));
10821 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10822 inst
.instruction
|= Rd
;
10823 inst
.instruction
|= Rn
<< 3;
10827 /* Similarly, but for instructions where the arithmetic operation is
10828 commutative, so we can allow either of them to be different from
10829 the destination operand in a 16-bit instruction. For instance, all
10830 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10837 Rd
= inst
.operands
[0].reg
;
10838 Rs
= (inst
.operands
[1].present
10839 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10840 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10841 Rn
= inst
.operands
[2].reg
;
10843 reject_bad_reg (Rd
);
10844 reject_bad_reg (Rs
);
10845 if (inst
.operands
[2].isreg
)
10846 reject_bad_reg (Rn
);
10848 if (unified_syntax
)
10850 if (!inst
.operands
[2].isreg
)
10852 /* For an immediate, we always generate a 32-bit opcode;
10853 section relaxation will shrink it later if possible. */
10854 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10855 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10856 inst
.instruction
|= Rd
<< 8;
10857 inst
.instruction
|= Rs
<< 16;
10858 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10862 bfd_boolean narrow
;
10864 /* See if we can do this with a 16-bit instruction. */
10865 if (THUMB_SETS_FLAGS (inst
.instruction
))
10866 narrow
= !in_it_block ();
10868 narrow
= in_it_block ();
10870 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10872 if (inst
.operands
[2].shifted
)
10874 if (inst
.size_req
== 4)
10881 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10882 inst
.instruction
|= Rd
;
10883 inst
.instruction
|= Rn
<< 3;
10888 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10889 inst
.instruction
|= Rd
;
10890 inst
.instruction
|= Rs
<< 3;
10895 /* If we get here, it can't be done in 16 bits. */
10896 constraint (inst
.operands
[2].shifted
10897 && inst
.operands
[2].immisreg
,
10898 _("shift must be constant"));
10899 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10900 inst
.instruction
|= Rd
<< 8;
10901 inst
.instruction
|= Rs
<< 16;
10902 encode_thumb32_shifted_operand (2);
10907 /* On its face this is a lie - the instruction does set the
10908 flags. However, the only supported mnemonic in this mode
10909 says it doesn't. */
10910 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10912 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10913 _("unshifted register required"));
10914 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10916 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10917 inst
.instruction
|= Rd
;
10920 inst
.instruction
|= Rn
<< 3;
10922 inst
.instruction
|= Rs
<< 3;
10924 constraint (1, _("dest must overlap one source register"));
10932 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10933 constraint (msb
> 32, _("bit-field extends past end of register"));
10934 /* The instruction encoding stores the LSB and MSB,
10935 not the LSB and width. */
10936 Rd
= inst
.operands
[0].reg
;
10937 reject_bad_reg (Rd
);
10938 inst
.instruction
|= Rd
<< 8;
10939 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10940 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10941 inst
.instruction
|= msb
- 1;
10950 Rd
= inst
.operands
[0].reg
;
10951 reject_bad_reg (Rd
);
10953 /* #0 in second position is alternative syntax for bfc, which is
10954 the same instruction but with REG_PC in the Rm field. */
10955 if (!inst
.operands
[1].isreg
)
10959 Rn
= inst
.operands
[1].reg
;
10960 reject_bad_reg (Rn
);
10963 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
10964 constraint (msb
> 32, _("bit-field extends past end of register"));
10965 /* The instruction encoding stores the LSB and MSB,
10966 not the LSB and width. */
10967 inst
.instruction
|= Rd
<< 8;
10968 inst
.instruction
|= Rn
<< 16;
10969 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10970 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10971 inst
.instruction
|= msb
- 1;
10979 Rd
= inst
.operands
[0].reg
;
10980 Rn
= inst
.operands
[1].reg
;
10982 reject_bad_reg (Rd
);
10983 reject_bad_reg (Rn
);
10985 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
10986 _("bit-field extends past end of register"));
10987 inst
.instruction
|= Rd
<< 8;
10988 inst
.instruction
|= Rn
<< 16;
10989 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10990 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10991 inst
.instruction
|= inst
.operands
[3].imm
- 1;
10994 /* ARM V5 Thumb BLX (argument parse)
10995 BLX <target_addr> which is BLX(1)
10996 BLX <Rm> which is BLX(2)
10997 Unfortunately, there are two different opcodes for this mnemonic.
10998 So, the insns[].value is not used, and the code here zaps values
10999 into inst.instruction.
11001 ??? How to take advantage of the additional two bits of displacement
11002 available in Thumb32 mode? Need new relocation? */
11007 set_it_insn_type_last ();
11009 if (inst
.operands
[0].isreg
)
11011 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11012 /* We have a register, so this is BLX(2). */
11013 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11017 /* No register. This must be BLX(1). */
11018 inst
.instruction
= 0xf000e800;
11019 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11028 bfd_reloc_code_real_type reloc
;
11031 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
11033 if (in_it_block ())
11035 /* Conditional branches inside IT blocks are encoded as unconditional
11037 cond
= COND_ALWAYS
;
11042 if (cond
!= COND_ALWAYS
)
11043 opcode
= T_MNEM_bcond
;
11045 opcode
= inst
.instruction
;
11048 && (inst
.size_req
== 4
11049 || (inst
.size_req
!= 2
11050 && (inst
.operands
[0].hasreloc
11051 || inst
.reloc
.exp
.X_op
== O_constant
))))
11053 inst
.instruction
= THUMB_OP32(opcode
);
11054 if (cond
== COND_ALWAYS
)
11055 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11058 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11059 _("selected architecture does not support "
11060 "wide conditional branch instruction"));
11062 gas_assert (cond
!= 0xF);
11063 inst
.instruction
|= cond
<< 22;
11064 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11069 inst
.instruction
= THUMB_OP16(opcode
);
11070 if (cond
== COND_ALWAYS
)
11071 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11074 inst
.instruction
|= cond
<< 8;
11075 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11077 /* Allow section relaxation. */
11078 if (unified_syntax
&& inst
.size_req
!= 2)
11079 inst
.relax
= opcode
;
11081 inst
.reloc
.type
= reloc
;
11082 inst
.reloc
.pc_rel
= 1;
11085 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11086 between the two is the maximum immediate allowed - which is passed in
11089 do_t_bkpt_hlt1 (int range
)
11091 constraint (inst
.cond
!= COND_ALWAYS
,
11092 _("instruction is always unconditional"));
11093 if (inst
.operands
[0].present
)
11095 constraint (inst
.operands
[0].imm
> range
,
11096 _("immediate value out of range"));
11097 inst
.instruction
|= inst
.operands
[0].imm
;
11100 set_it_insn_type (NEUTRAL_IT_INSN
);
11106 do_t_bkpt_hlt1 (63);
11112 do_t_bkpt_hlt1 (255);
11116 do_t_branch23 (void)
11118 set_it_insn_type_last ();
11119 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11121 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11122 this file. We used to simply ignore the PLT reloc type here --
11123 the branch encoding is now needed to deal with TLSCALL relocs.
11124 So if we see a PLT reloc now, put it back to how it used to be to
11125 keep the preexisting behaviour. */
11126 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11127 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11129 #if defined(OBJ_COFF)
11130 /* If the destination of the branch is a defined symbol which does not have
11131 the THUMB_FUNC attribute, then we must be calling a function which has
11132 the (interfacearm) attribute. We look for the Thumb entry point to that
11133 function and change the branch to refer to that function instead. */
11134 if ( inst
.reloc
.exp
.X_op
== O_symbol
11135 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11136 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11137 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11138 inst
.reloc
.exp
.X_add_symbol
=
11139 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11146 set_it_insn_type_last ();
11147 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11148 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11149 should cause the alignment to be checked once it is known. This is
11150 because BX PC only works if the instruction is word aligned. */
11158 set_it_insn_type_last ();
11159 Rm
= inst
.operands
[0].reg
;
11160 reject_bad_reg (Rm
);
11161 inst
.instruction
|= Rm
<< 16;
11170 Rd
= inst
.operands
[0].reg
;
11171 Rm
= inst
.operands
[1].reg
;
11173 reject_bad_reg (Rd
);
11174 reject_bad_reg (Rm
);
11176 inst
.instruction
|= Rd
<< 8;
11177 inst
.instruction
|= Rm
<< 16;
11178 inst
.instruction
|= Rm
;
11184 set_it_insn_type (OUTSIDE_IT_INSN
);
11185 inst
.instruction
|= inst
.operands
[0].imm
;
11191 set_it_insn_type (OUTSIDE_IT_INSN
);
11193 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11194 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11196 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11197 inst
.instruction
= 0xf3af8000;
11198 inst
.instruction
|= imod
<< 9;
11199 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11200 if (inst
.operands
[1].present
)
11201 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11205 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11206 && (inst
.operands
[0].imm
& 4),
11207 _("selected processor does not support 'A' form "
11208 "of this instruction"));
11209 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11210 _("Thumb does not support the 2-argument "
11211 "form of this instruction"));
11212 inst
.instruction
|= inst
.operands
[0].imm
;
11216 /* THUMB CPY instruction (argument parse). */
11221 if (inst
.size_req
== 4)
11223 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11224 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11225 inst
.instruction
|= inst
.operands
[1].reg
;
11229 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11230 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11231 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11238 set_it_insn_type (OUTSIDE_IT_INSN
);
11239 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11240 inst
.instruction
|= inst
.operands
[0].reg
;
11241 inst
.reloc
.pc_rel
= 1;
11242 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11248 inst
.instruction
|= inst
.operands
[0].imm
;
11254 unsigned Rd
, Rn
, Rm
;
11256 Rd
= inst
.operands
[0].reg
;
11257 Rn
= (inst
.operands
[1].present
11258 ? inst
.operands
[1].reg
: Rd
);
11259 Rm
= inst
.operands
[2].reg
;
11261 reject_bad_reg (Rd
);
11262 reject_bad_reg (Rn
);
11263 reject_bad_reg (Rm
);
11265 inst
.instruction
|= Rd
<< 8;
11266 inst
.instruction
|= Rn
<< 16;
11267 inst
.instruction
|= Rm
;
11273 if (unified_syntax
&& inst
.size_req
== 4)
11274 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11276 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11282 unsigned int cond
= inst
.operands
[0].imm
;
11284 set_it_insn_type (IT_INSN
);
11285 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11287 now_it
.warn_deprecated
= FALSE
;
11289 /* If the condition is a negative condition, invert the mask. */
11290 if ((cond
& 0x1) == 0x0)
11292 unsigned int mask
= inst
.instruction
& 0x000f;
11294 if ((mask
& 0x7) == 0)
11296 /* No conversion needed. */
11297 now_it
.block_length
= 1;
11299 else if ((mask
& 0x3) == 0)
11302 now_it
.block_length
= 2;
11304 else if ((mask
& 0x1) == 0)
11307 now_it
.block_length
= 3;
11312 now_it
.block_length
= 4;
11315 inst
.instruction
&= 0xfff0;
11316 inst
.instruction
|= mask
;
11319 inst
.instruction
|= cond
<< 4;
11322 /* Helper function used for both push/pop and ldm/stm. */
11324 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11328 load
= (inst
.instruction
& (1 << 20)) != 0;
11330 if (mask
& (1 << 13))
11331 inst
.error
= _("SP not allowed in register list");
11333 if ((mask
& (1 << base
)) != 0
11335 inst
.error
= _("having the base register in the register list when "
11336 "using write back is UNPREDICTABLE");
11340 if (mask
& (1 << 15))
11342 if (mask
& (1 << 14))
11343 inst
.error
= _("LR and PC should not both be in register list");
11345 set_it_insn_type_last ();
11350 if (mask
& (1 << 15))
11351 inst
.error
= _("PC not allowed in register list");
11354 if ((mask
& (mask
- 1)) == 0)
11356 /* Single register transfers implemented as str/ldr. */
11359 if (inst
.instruction
& (1 << 23))
11360 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11362 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11366 if (inst
.instruction
& (1 << 23))
11367 inst
.instruction
= 0x00800000; /* ia -> [base] */
11369 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11372 inst
.instruction
|= 0xf8400000;
11374 inst
.instruction
|= 0x00100000;
11376 mask
= ffs (mask
) - 1;
11379 else if (writeback
)
11380 inst
.instruction
|= WRITE_BACK
;
11382 inst
.instruction
|= mask
;
11383 inst
.instruction
|= base
<< 16;
11389 /* This really doesn't seem worth it. */
11390 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11391 _("expression too complex"));
11392 constraint (inst
.operands
[1].writeback
,
11393 _("Thumb load/store multiple does not support {reglist}^"));
11395 if (unified_syntax
)
11397 bfd_boolean narrow
;
11401 /* See if we can use a 16-bit instruction. */
11402 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11403 && inst
.size_req
!= 4
11404 && !(inst
.operands
[1].imm
& ~0xff))
11406 mask
= 1 << inst
.operands
[0].reg
;
11408 if (inst
.operands
[0].reg
<= 7)
11410 if (inst
.instruction
== T_MNEM_stmia
11411 ? inst
.operands
[0].writeback
11412 : (inst
.operands
[0].writeback
11413 == !(inst
.operands
[1].imm
& mask
)))
11415 if (inst
.instruction
== T_MNEM_stmia
11416 && (inst
.operands
[1].imm
& mask
)
11417 && (inst
.operands
[1].imm
& (mask
- 1)))
11418 as_warn (_("value stored for r%d is UNKNOWN"),
11419 inst
.operands
[0].reg
);
11421 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11422 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11423 inst
.instruction
|= inst
.operands
[1].imm
;
11426 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11428 /* This means 1 register in reg list one of 3 situations:
11429 1. Instruction is stmia, but without writeback.
11430 2. lmdia without writeback, but with Rn not in
11432 3. ldmia with writeback, but with Rn in reglist.
11433 Case 3 is UNPREDICTABLE behaviour, so we handle
11434 case 1 and 2 which can be converted into a 16-bit
11435 str or ldr. The SP cases are handled below. */
11436 unsigned long opcode
;
11437 /* First, record an error for Case 3. */
11438 if (inst
.operands
[1].imm
& mask
11439 && inst
.operands
[0].writeback
)
11441 _("having the base register in the register list when "
11442 "using write back is UNPREDICTABLE");
11444 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11446 inst
.instruction
= THUMB_OP16 (opcode
);
11447 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11448 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11452 else if (inst
.operands
[0] .reg
== REG_SP
)
11454 if (inst
.operands
[0].writeback
)
11457 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11458 ? T_MNEM_push
: T_MNEM_pop
);
11459 inst
.instruction
|= inst
.operands
[1].imm
;
11462 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11465 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11466 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11467 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11475 if (inst
.instruction
< 0xffff)
11476 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11478 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11479 inst
.operands
[0].writeback
);
11484 constraint (inst
.operands
[0].reg
> 7
11485 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11486 constraint (inst
.instruction
!= T_MNEM_ldmia
11487 && inst
.instruction
!= T_MNEM_stmia
,
11488 _("Thumb-2 instruction only valid in unified syntax"));
11489 if (inst
.instruction
== T_MNEM_stmia
)
11491 if (!inst
.operands
[0].writeback
)
11492 as_warn (_("this instruction will write back the base register"));
11493 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11494 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11495 as_warn (_("value stored for r%d is UNKNOWN"),
11496 inst
.operands
[0].reg
);
11500 if (!inst
.operands
[0].writeback
11501 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11502 as_warn (_("this instruction will write back the base register"));
11503 else if (inst
.operands
[0].writeback
11504 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11505 as_warn (_("this instruction will not write back the base register"));
11508 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11509 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11510 inst
.instruction
|= inst
.operands
[1].imm
;
11517 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11518 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11519 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11520 || inst
.operands
[1].negative
,
11523 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11525 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11526 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11527 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11533 if (!inst
.operands
[1].present
)
11535 constraint (inst
.operands
[0].reg
== REG_LR
,
11536 _("r14 not allowed as first register "
11537 "when second register is omitted"));
11538 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11540 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11543 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11544 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11545 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11551 unsigned long opcode
;
11554 if (inst
.operands
[0].isreg
11555 && !inst
.operands
[0].preind
11556 && inst
.operands
[0].reg
== REG_PC
)
11557 set_it_insn_type_last ();
11559 opcode
= inst
.instruction
;
11560 if (unified_syntax
)
11562 if (!inst
.operands
[1].isreg
)
11564 if (opcode
<= 0xffff)
11565 inst
.instruction
= THUMB_OP32 (opcode
);
11566 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11569 if (inst
.operands
[1].isreg
11570 && !inst
.operands
[1].writeback
11571 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11572 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11573 && opcode
<= 0xffff
11574 && inst
.size_req
!= 4)
11576 /* Insn may have a 16-bit form. */
11577 Rn
= inst
.operands
[1].reg
;
11578 if (inst
.operands
[1].immisreg
)
11580 inst
.instruction
= THUMB_OP16 (opcode
);
11582 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11584 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11585 reject_bad_reg (inst
.operands
[1].imm
);
11587 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11588 && opcode
!= T_MNEM_ldrsb
)
11589 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11590 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11597 if (inst
.reloc
.pc_rel
)
11598 opcode
= T_MNEM_ldr_pc2
;
11600 opcode
= T_MNEM_ldr_pc
;
11604 if (opcode
== T_MNEM_ldr
)
11605 opcode
= T_MNEM_ldr_sp
;
11607 opcode
= T_MNEM_str_sp
;
11609 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11613 inst
.instruction
= inst
.operands
[0].reg
;
11614 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11616 inst
.instruction
|= THUMB_OP16 (opcode
);
11617 if (inst
.size_req
== 2)
11618 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11620 inst
.relax
= opcode
;
11624 /* Definitely a 32-bit variant. */
11626 /* Warning for Erratum 752419. */
11627 if (opcode
== T_MNEM_ldr
11628 && inst
.operands
[0].reg
== REG_SP
11629 && inst
.operands
[1].writeback
== 1
11630 && !inst
.operands
[1].immisreg
)
11632 if (no_cpu_selected ()
11633 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11634 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11635 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11636 as_warn (_("This instruction may be unpredictable "
11637 "if executed on M-profile cores "
11638 "with interrupts enabled."));
11641 /* Do some validations regarding addressing modes. */
11642 if (inst
.operands
[1].immisreg
)
11643 reject_bad_reg (inst
.operands
[1].imm
);
11645 constraint (inst
.operands
[1].writeback
== 1
11646 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11649 inst
.instruction
= THUMB_OP32 (opcode
);
11650 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11651 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11652 check_ldr_r15_aligned ();
11656 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11658 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11660 /* Only [Rn,Rm] is acceptable. */
11661 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11662 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11663 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11664 || inst
.operands
[1].negative
,
11665 _("Thumb does not support this addressing mode"));
11666 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11670 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11671 if (!inst
.operands
[1].isreg
)
11672 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11675 constraint (!inst
.operands
[1].preind
11676 || inst
.operands
[1].shifted
11677 || inst
.operands
[1].writeback
,
11678 _("Thumb does not support this addressing mode"));
11679 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11681 constraint (inst
.instruction
& 0x0600,
11682 _("byte or halfword not valid for base register"));
11683 constraint (inst
.operands
[1].reg
== REG_PC
11684 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11685 _("r15 based store not allowed"));
11686 constraint (inst
.operands
[1].immisreg
,
11687 _("invalid base register for register offset"));
11689 if (inst
.operands
[1].reg
== REG_PC
)
11690 inst
.instruction
= T_OPCODE_LDR_PC
;
11691 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11692 inst
.instruction
= T_OPCODE_LDR_SP
;
11694 inst
.instruction
= T_OPCODE_STR_SP
;
11696 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11697 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11701 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11702 if (!inst
.operands
[1].immisreg
)
11704 /* Immediate offset. */
11705 inst
.instruction
|= inst
.operands
[0].reg
;
11706 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11707 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11711 /* Register offset. */
11712 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11713 constraint (inst
.operands
[1].negative
,
11714 _("Thumb does not support this addressing mode"));
11717 switch (inst
.instruction
)
11719 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11720 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11721 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11722 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11723 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11724 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11725 case 0x5600 /* ldrsb */:
11726 case 0x5e00 /* ldrsh */: break;
11730 inst
.instruction
|= inst
.operands
[0].reg
;
11731 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11732 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11738 if (!inst
.operands
[1].present
)
11740 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11741 constraint (inst
.operands
[0].reg
== REG_LR
,
11742 _("r14 not allowed here"));
11743 constraint (inst
.operands
[0].reg
== REG_R12
,
11744 _("r12 not allowed here"));
11747 if (inst
.operands
[2].writeback
11748 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11749 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11750 as_warn (_("base register written back, and overlaps "
11751 "one of transfer registers"));
11753 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11754 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11755 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11761 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11762 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11768 unsigned Rd
, Rn
, Rm
, Ra
;
11770 Rd
= inst
.operands
[0].reg
;
11771 Rn
= inst
.operands
[1].reg
;
11772 Rm
= inst
.operands
[2].reg
;
11773 Ra
= inst
.operands
[3].reg
;
11775 reject_bad_reg (Rd
);
11776 reject_bad_reg (Rn
);
11777 reject_bad_reg (Rm
);
11778 reject_bad_reg (Ra
);
11780 inst
.instruction
|= Rd
<< 8;
11781 inst
.instruction
|= Rn
<< 16;
11782 inst
.instruction
|= Rm
;
11783 inst
.instruction
|= Ra
<< 12;
11789 unsigned RdLo
, RdHi
, Rn
, Rm
;
11791 RdLo
= inst
.operands
[0].reg
;
11792 RdHi
= inst
.operands
[1].reg
;
11793 Rn
= inst
.operands
[2].reg
;
11794 Rm
= inst
.operands
[3].reg
;
11796 reject_bad_reg (RdLo
);
11797 reject_bad_reg (RdHi
);
11798 reject_bad_reg (Rn
);
11799 reject_bad_reg (Rm
);
11801 inst
.instruction
|= RdLo
<< 12;
11802 inst
.instruction
|= RdHi
<< 8;
11803 inst
.instruction
|= Rn
<< 16;
11804 inst
.instruction
|= Rm
;
11808 do_t_mov_cmp (void)
11812 Rn
= inst
.operands
[0].reg
;
11813 Rm
= inst
.operands
[1].reg
;
11816 set_it_insn_type_last ();
11818 if (unified_syntax
)
11820 int r0off
= (inst
.instruction
== T_MNEM_mov
11821 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11822 unsigned long opcode
;
11823 bfd_boolean narrow
;
11824 bfd_boolean low_regs
;
11826 low_regs
= (Rn
<= 7 && Rm
<= 7);
11827 opcode
= inst
.instruction
;
11828 if (in_it_block ())
11829 narrow
= opcode
!= T_MNEM_movs
;
11831 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11832 if (inst
.size_req
== 4
11833 || inst
.operands
[1].shifted
)
11836 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11837 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11838 && !inst
.operands
[1].shifted
11842 inst
.instruction
= T2_SUBS_PC_LR
;
11846 if (opcode
== T_MNEM_cmp
)
11848 constraint (Rn
== REG_PC
, BAD_PC
);
11851 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11853 warn_deprecated_sp (Rm
);
11854 /* R15 was documented as a valid choice for Rm in ARMv6,
11855 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11856 tools reject R15, so we do too. */
11857 constraint (Rm
== REG_PC
, BAD_PC
);
11860 reject_bad_reg (Rm
);
11862 else if (opcode
== T_MNEM_mov
11863 || opcode
== T_MNEM_movs
)
11865 if (inst
.operands
[1].isreg
)
11867 if (opcode
== T_MNEM_movs
)
11869 reject_bad_reg (Rn
);
11870 reject_bad_reg (Rm
);
11874 /* This is mov.n. */
11875 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11876 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11878 as_tsktsk (_("Use of r%u as a source register is "
11879 "deprecated when r%u is the destination "
11880 "register."), Rm
, Rn
);
11885 /* This is mov.w. */
11886 constraint (Rn
== REG_PC
, BAD_PC
);
11887 constraint (Rm
== REG_PC
, BAD_PC
);
11888 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11892 reject_bad_reg (Rn
);
11895 if (!inst
.operands
[1].isreg
)
11897 /* Immediate operand. */
11898 if (!in_it_block () && opcode
== T_MNEM_mov
)
11900 if (low_regs
&& narrow
)
11902 inst
.instruction
= THUMB_OP16 (opcode
);
11903 inst
.instruction
|= Rn
<< 8;
11904 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11905 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11907 if (inst
.size_req
== 2)
11908 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11910 inst
.relax
= opcode
;
11915 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11916 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
11917 THUMB1_RELOC_ONLY
);
11919 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11920 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11921 inst
.instruction
|= Rn
<< r0off
;
11922 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11925 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11926 && (inst
.instruction
== T_MNEM_mov
11927 || inst
.instruction
== T_MNEM_movs
))
11929 /* Register shifts are encoded as separate shift instructions. */
11930 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11932 if (in_it_block ())
11937 if (inst
.size_req
== 4)
11940 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11946 switch (inst
.operands
[1].shift_kind
)
11949 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11952 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11955 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11958 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
11964 inst
.instruction
= opcode
;
11967 inst
.instruction
|= Rn
;
11968 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
11973 inst
.instruction
|= CONDS_BIT
;
11975 inst
.instruction
|= Rn
<< 8;
11976 inst
.instruction
|= Rm
<< 16;
11977 inst
.instruction
|= inst
.operands
[1].imm
;
11982 /* Some mov with immediate shift have narrow variants.
11983 Register shifts are handled above. */
11984 if (low_regs
&& inst
.operands
[1].shifted
11985 && (inst
.instruction
== T_MNEM_mov
11986 || inst
.instruction
== T_MNEM_movs
))
11988 if (in_it_block ())
11989 narrow
= (inst
.instruction
== T_MNEM_mov
);
11991 narrow
= (inst
.instruction
== T_MNEM_movs
);
11996 switch (inst
.operands
[1].shift_kind
)
11998 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11999 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12000 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12001 default: narrow
= FALSE
; break;
12007 inst
.instruction
|= Rn
;
12008 inst
.instruction
|= Rm
<< 3;
12009 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12013 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12014 inst
.instruction
|= Rn
<< r0off
;
12015 encode_thumb32_shifted_operand (1);
12019 switch (inst
.instruction
)
12022 /* In v4t or v5t a move of two lowregs produces unpredictable
12023 results. Don't allow this. */
12026 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12027 "MOV Rd, Rs with two low registers is not "
12028 "permitted on this architecture");
12029 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12033 inst
.instruction
= T_OPCODE_MOV_HR
;
12034 inst
.instruction
|= (Rn
& 0x8) << 4;
12035 inst
.instruction
|= (Rn
& 0x7);
12036 inst
.instruction
|= Rm
<< 3;
12040 /* We know we have low registers at this point.
12041 Generate LSLS Rd, Rs, #0. */
12042 inst
.instruction
= T_OPCODE_LSL_I
;
12043 inst
.instruction
|= Rn
;
12044 inst
.instruction
|= Rm
<< 3;
12050 inst
.instruction
= T_OPCODE_CMP_LR
;
12051 inst
.instruction
|= Rn
;
12052 inst
.instruction
|= Rm
<< 3;
12056 inst
.instruction
= T_OPCODE_CMP_HR
;
12057 inst
.instruction
|= (Rn
& 0x8) << 4;
12058 inst
.instruction
|= (Rn
& 0x7);
12059 inst
.instruction
|= Rm
<< 3;
12066 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12068 /* PR 10443: Do not silently ignore shifted operands. */
12069 constraint (inst
.operands
[1].shifted
,
12070 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12072 if (inst
.operands
[1].isreg
)
12074 if (Rn
< 8 && Rm
< 8)
12076 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12077 since a MOV instruction produces unpredictable results. */
12078 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12079 inst
.instruction
= T_OPCODE_ADD_I3
;
12081 inst
.instruction
= T_OPCODE_CMP_LR
;
12083 inst
.instruction
|= Rn
;
12084 inst
.instruction
|= Rm
<< 3;
12088 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12089 inst
.instruction
= T_OPCODE_MOV_HR
;
12091 inst
.instruction
= T_OPCODE_CMP_HR
;
12097 constraint (Rn
> 7,
12098 _("only lo regs allowed with immediate"));
12099 inst
.instruction
|= Rn
<< 8;
12100 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
12111 top
= (inst
.instruction
& 0x00800000) != 0;
12112 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
12114 constraint (top
, _(":lower16: not allowed this instruction"));
12115 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12117 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12119 constraint (!top
, _(":upper16: not allowed this instruction"));
12120 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12123 Rd
= inst
.operands
[0].reg
;
12124 reject_bad_reg (Rd
);
12126 inst
.instruction
|= Rd
<< 8;
12127 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12129 imm
= inst
.reloc
.exp
.X_add_number
;
12130 inst
.instruction
|= (imm
& 0xf000) << 4;
12131 inst
.instruction
|= (imm
& 0x0800) << 15;
12132 inst
.instruction
|= (imm
& 0x0700) << 4;
12133 inst
.instruction
|= (imm
& 0x00ff);
12138 do_t_mvn_tst (void)
12142 Rn
= inst
.operands
[0].reg
;
12143 Rm
= inst
.operands
[1].reg
;
12145 if (inst
.instruction
== T_MNEM_cmp
12146 || inst
.instruction
== T_MNEM_cmn
)
12147 constraint (Rn
== REG_PC
, BAD_PC
);
12149 reject_bad_reg (Rn
);
12150 reject_bad_reg (Rm
);
12152 if (unified_syntax
)
12154 int r0off
= (inst
.instruction
== T_MNEM_mvn
12155 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12156 bfd_boolean narrow
;
12158 if (inst
.size_req
== 4
12159 || inst
.instruction
> 0xffff
12160 || inst
.operands
[1].shifted
12161 || Rn
> 7 || Rm
> 7)
12163 else if (inst
.instruction
== T_MNEM_cmn
12164 || inst
.instruction
== T_MNEM_tst
)
12166 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12167 narrow
= !in_it_block ();
12169 narrow
= in_it_block ();
12171 if (!inst
.operands
[1].isreg
)
12173 /* For an immediate, we always generate a 32-bit opcode;
12174 section relaxation will shrink it later if possible. */
12175 if (inst
.instruction
< 0xffff)
12176 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12177 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12178 inst
.instruction
|= Rn
<< r0off
;
12179 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12183 /* See if we can do this with a 16-bit instruction. */
12186 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12187 inst
.instruction
|= Rn
;
12188 inst
.instruction
|= Rm
<< 3;
12192 constraint (inst
.operands
[1].shifted
12193 && inst
.operands
[1].immisreg
,
12194 _("shift must be constant"));
12195 if (inst
.instruction
< 0xffff)
12196 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12197 inst
.instruction
|= Rn
<< r0off
;
12198 encode_thumb32_shifted_operand (1);
12204 constraint (inst
.instruction
> 0xffff
12205 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12206 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12207 _("unshifted register required"));
12208 constraint (Rn
> 7 || Rm
> 7,
12211 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12212 inst
.instruction
|= Rn
;
12213 inst
.instruction
|= Rm
<< 3;
12222 if (do_vfp_nsyn_mrs () == SUCCESS
)
12225 Rd
= inst
.operands
[0].reg
;
12226 reject_bad_reg (Rd
);
12227 inst
.instruction
|= Rd
<< 8;
12229 if (inst
.operands
[1].isreg
)
12231 unsigned br
= inst
.operands
[1].reg
;
12232 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12233 as_bad (_("bad register for mrs"));
12235 inst
.instruction
|= br
& (0xf << 16);
12236 inst
.instruction
|= (br
& 0x300) >> 4;
12237 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12241 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12243 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12245 /* PR gas/12698: The constraint is only applied for m_profile.
12246 If the user has specified -march=all, we want to ignore it as
12247 we are building for any CPU type, including non-m variants. */
12248 bfd_boolean m_profile
=
12249 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12250 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12251 "not support requested special purpose register"));
12254 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12256 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12257 _("'APSR', 'CPSR' or 'SPSR' expected"));
12259 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12260 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12261 inst
.instruction
|= 0xf0000;
12271 if (do_vfp_nsyn_msr () == SUCCESS
)
12274 constraint (!inst
.operands
[1].isreg
,
12275 _("Thumb encoding does not support an immediate here"));
12277 if (inst
.operands
[0].isreg
)
12278 flags
= (int)(inst
.operands
[0].reg
);
12280 flags
= inst
.operands
[0].imm
;
12282 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12284 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12286 /* PR gas/12698: The constraint is only applied for m_profile.
12287 If the user has specified -march=all, we want to ignore it as
12288 we are building for any CPU type, including non-m variants. */
12289 bfd_boolean m_profile
=
12290 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12291 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12292 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12293 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12294 && bits
!= PSR_f
)) && m_profile
,
12295 _("selected processor does not support requested special "
12296 "purpose register"));
12299 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12300 "requested special purpose register"));
12302 Rn
= inst
.operands
[1].reg
;
12303 reject_bad_reg (Rn
);
12305 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12306 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12307 inst
.instruction
|= (flags
& 0x300) >> 4;
12308 inst
.instruction
|= (flags
& 0xff);
12309 inst
.instruction
|= Rn
<< 16;
12315 bfd_boolean narrow
;
12316 unsigned Rd
, Rn
, Rm
;
12318 if (!inst
.operands
[2].present
)
12319 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12321 Rd
= inst
.operands
[0].reg
;
12322 Rn
= inst
.operands
[1].reg
;
12323 Rm
= inst
.operands
[2].reg
;
12325 if (unified_syntax
)
12327 if (inst
.size_req
== 4
12333 else if (inst
.instruction
== T_MNEM_muls
)
12334 narrow
= !in_it_block ();
12336 narrow
= in_it_block ();
12340 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12341 constraint (Rn
> 7 || Rm
> 7,
12348 /* 16-bit MULS/Conditional MUL. */
12349 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12350 inst
.instruction
|= Rd
;
12353 inst
.instruction
|= Rm
<< 3;
12355 inst
.instruction
|= Rn
<< 3;
12357 constraint (1, _("dest must overlap one source register"));
12361 constraint (inst
.instruction
!= T_MNEM_mul
,
12362 _("Thumb-2 MUL must not set flags"));
12364 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12365 inst
.instruction
|= Rd
<< 8;
12366 inst
.instruction
|= Rn
<< 16;
12367 inst
.instruction
|= Rm
<< 0;
12369 reject_bad_reg (Rd
);
12370 reject_bad_reg (Rn
);
12371 reject_bad_reg (Rm
);
12378 unsigned RdLo
, RdHi
, Rn
, Rm
;
12380 RdLo
= inst
.operands
[0].reg
;
12381 RdHi
= inst
.operands
[1].reg
;
12382 Rn
= inst
.operands
[2].reg
;
12383 Rm
= inst
.operands
[3].reg
;
12385 reject_bad_reg (RdLo
);
12386 reject_bad_reg (RdHi
);
12387 reject_bad_reg (Rn
);
12388 reject_bad_reg (Rm
);
12390 inst
.instruction
|= RdLo
<< 12;
12391 inst
.instruction
|= RdHi
<< 8;
12392 inst
.instruction
|= Rn
<< 16;
12393 inst
.instruction
|= Rm
;
12396 as_tsktsk (_("rdhi and rdlo must be different"));
12402 set_it_insn_type (NEUTRAL_IT_INSN
);
12404 if (unified_syntax
)
12406 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12408 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12409 inst
.instruction
|= inst
.operands
[0].imm
;
12413 /* PR9722: Check for Thumb2 availability before
12414 generating a thumb2 nop instruction. */
12415 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12417 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12418 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12421 inst
.instruction
= 0x46c0;
12426 constraint (inst
.operands
[0].present
,
12427 _("Thumb does not support NOP with hints"));
12428 inst
.instruction
= 0x46c0;
12435 if (unified_syntax
)
12437 bfd_boolean narrow
;
12439 if (THUMB_SETS_FLAGS (inst
.instruction
))
12440 narrow
= !in_it_block ();
12442 narrow
= in_it_block ();
12443 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12445 if (inst
.size_req
== 4)
12450 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12451 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12452 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12456 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12457 inst
.instruction
|= inst
.operands
[0].reg
;
12458 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12463 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12465 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12467 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12468 inst
.instruction
|= inst
.operands
[0].reg
;
12469 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12478 Rd
= inst
.operands
[0].reg
;
12479 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12481 reject_bad_reg (Rd
);
12482 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12483 reject_bad_reg (Rn
);
12485 inst
.instruction
|= Rd
<< 8;
12486 inst
.instruction
|= Rn
<< 16;
12488 if (!inst
.operands
[2].isreg
)
12490 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12491 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12497 Rm
= inst
.operands
[2].reg
;
12498 reject_bad_reg (Rm
);
12500 constraint (inst
.operands
[2].shifted
12501 && inst
.operands
[2].immisreg
,
12502 _("shift must be constant"));
12503 encode_thumb32_shifted_operand (2);
12510 unsigned Rd
, Rn
, Rm
;
12512 Rd
= inst
.operands
[0].reg
;
12513 Rn
= inst
.operands
[1].reg
;
12514 Rm
= inst
.operands
[2].reg
;
12516 reject_bad_reg (Rd
);
12517 reject_bad_reg (Rn
);
12518 reject_bad_reg (Rm
);
12520 inst
.instruction
|= Rd
<< 8;
12521 inst
.instruction
|= Rn
<< 16;
12522 inst
.instruction
|= Rm
;
12523 if (inst
.operands
[3].present
)
12525 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12526 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12527 _("expression too complex"));
12528 inst
.instruction
|= (val
& 0x1c) << 10;
12529 inst
.instruction
|= (val
& 0x03) << 6;
12536 if (!inst
.operands
[3].present
)
12540 inst
.instruction
&= ~0x00000020;
12542 /* PR 10168. Swap the Rm and Rn registers. */
12543 Rtmp
= inst
.operands
[1].reg
;
12544 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12545 inst
.operands
[2].reg
= Rtmp
;
12553 if (inst
.operands
[0].immisreg
)
12554 reject_bad_reg (inst
.operands
[0].imm
);
12556 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12560 do_t_push_pop (void)
12564 constraint (inst
.operands
[0].writeback
,
12565 _("push/pop do not support {reglist}^"));
12566 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12567 _("expression too complex"));
12569 mask
= inst
.operands
[0].imm
;
12570 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12571 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12572 else if (inst
.size_req
!= 4
12573 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
12574 ? REG_LR
: REG_PC
)))
12576 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12577 inst
.instruction
|= THUMB_PP_PC_LR
;
12578 inst
.instruction
|= mask
& 0xff;
12580 else if (unified_syntax
)
12582 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12583 encode_thumb2_ldmstm (13, mask
, TRUE
);
12587 inst
.error
= _("invalid register list to push/pop instruction");
12597 Rd
= inst
.operands
[0].reg
;
12598 Rm
= inst
.operands
[1].reg
;
12600 reject_bad_reg (Rd
);
12601 reject_bad_reg (Rm
);
12603 inst
.instruction
|= Rd
<< 8;
12604 inst
.instruction
|= Rm
<< 16;
12605 inst
.instruction
|= Rm
;
12613 Rd
= inst
.operands
[0].reg
;
12614 Rm
= inst
.operands
[1].reg
;
12616 reject_bad_reg (Rd
);
12617 reject_bad_reg (Rm
);
12619 if (Rd
<= 7 && Rm
<= 7
12620 && inst
.size_req
!= 4)
12622 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12623 inst
.instruction
|= Rd
;
12624 inst
.instruction
|= Rm
<< 3;
12626 else if (unified_syntax
)
12628 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12629 inst
.instruction
|= Rd
<< 8;
12630 inst
.instruction
|= Rm
<< 16;
12631 inst
.instruction
|= Rm
;
12634 inst
.error
= BAD_HIREG
;
12642 Rd
= inst
.operands
[0].reg
;
12643 Rm
= inst
.operands
[1].reg
;
12645 reject_bad_reg (Rd
);
12646 reject_bad_reg (Rm
);
12648 inst
.instruction
|= Rd
<< 8;
12649 inst
.instruction
|= Rm
;
12657 Rd
= inst
.operands
[0].reg
;
12658 Rs
= (inst
.operands
[1].present
12659 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12660 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12662 reject_bad_reg (Rd
);
12663 reject_bad_reg (Rs
);
12664 if (inst
.operands
[2].isreg
)
12665 reject_bad_reg (inst
.operands
[2].reg
);
12667 inst
.instruction
|= Rd
<< 8;
12668 inst
.instruction
|= Rs
<< 16;
12669 if (!inst
.operands
[2].isreg
)
12671 bfd_boolean narrow
;
12673 if ((inst
.instruction
& 0x00100000) != 0)
12674 narrow
= !in_it_block ();
12676 narrow
= in_it_block ();
12678 if (Rd
> 7 || Rs
> 7)
12681 if (inst
.size_req
== 4 || !unified_syntax
)
12684 if (inst
.reloc
.exp
.X_op
!= O_constant
12685 || inst
.reloc
.exp
.X_add_number
!= 0)
12688 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12689 relaxation, but it doesn't seem worth the hassle. */
12692 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12693 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12694 inst
.instruction
|= Rs
<< 3;
12695 inst
.instruction
|= Rd
;
12699 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12700 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12704 encode_thumb32_shifted_operand (2);
12710 if (warn_on_deprecated
12711 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12712 as_tsktsk (_("setend use is deprecated for ARMv8"));
12714 set_it_insn_type (OUTSIDE_IT_INSN
);
12715 if (inst
.operands
[0].imm
)
12716 inst
.instruction
|= 0x8;
12722 if (!inst
.operands
[1].present
)
12723 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12725 if (unified_syntax
)
12727 bfd_boolean narrow
;
12730 switch (inst
.instruction
)
12733 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12735 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12737 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12739 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12743 if (THUMB_SETS_FLAGS (inst
.instruction
))
12744 narrow
= !in_it_block ();
12746 narrow
= in_it_block ();
12747 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12749 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12751 if (inst
.operands
[2].isreg
12752 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12753 || inst
.operands
[2].reg
> 7))
12755 if (inst
.size_req
== 4)
12758 reject_bad_reg (inst
.operands
[0].reg
);
12759 reject_bad_reg (inst
.operands
[1].reg
);
12763 if (inst
.operands
[2].isreg
)
12765 reject_bad_reg (inst
.operands
[2].reg
);
12766 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12767 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12768 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12769 inst
.instruction
|= inst
.operands
[2].reg
;
12771 /* PR 12854: Error on extraneous shifts. */
12772 constraint (inst
.operands
[2].shifted
,
12773 _("extraneous shift as part of operand to shift insn"));
12777 inst
.operands
[1].shifted
= 1;
12778 inst
.operands
[1].shift_kind
= shift_kind
;
12779 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12780 ? T_MNEM_movs
: T_MNEM_mov
);
12781 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12782 encode_thumb32_shifted_operand (1);
12783 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12784 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12789 if (inst
.operands
[2].isreg
)
12791 switch (shift_kind
)
12793 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12794 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12795 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12796 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12800 inst
.instruction
|= inst
.operands
[0].reg
;
12801 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12803 /* PR 12854: Error on extraneous shifts. */
12804 constraint (inst
.operands
[2].shifted
,
12805 _("extraneous shift as part of operand to shift insn"));
12809 switch (shift_kind
)
12811 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12812 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12813 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12816 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12817 inst
.instruction
|= inst
.operands
[0].reg
;
12818 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12824 constraint (inst
.operands
[0].reg
> 7
12825 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12826 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12828 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12830 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12831 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12832 _("source1 and dest must be same register"));
12834 switch (inst
.instruction
)
12836 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12837 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12838 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12839 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12843 inst
.instruction
|= inst
.operands
[0].reg
;
12844 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12846 /* PR 12854: Error on extraneous shifts. */
12847 constraint (inst
.operands
[2].shifted
,
12848 _("extraneous shift as part of operand to shift insn"));
12852 switch (inst
.instruction
)
12854 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12855 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12856 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12857 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12860 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12861 inst
.instruction
|= inst
.operands
[0].reg
;
12862 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12870 unsigned Rd
, Rn
, Rm
;
12872 Rd
= inst
.operands
[0].reg
;
12873 Rn
= inst
.operands
[1].reg
;
12874 Rm
= inst
.operands
[2].reg
;
12876 reject_bad_reg (Rd
);
12877 reject_bad_reg (Rn
);
12878 reject_bad_reg (Rm
);
12880 inst
.instruction
|= Rd
<< 8;
12881 inst
.instruction
|= Rn
<< 16;
12882 inst
.instruction
|= Rm
;
12888 unsigned Rd
, Rn
, Rm
;
12890 Rd
= inst
.operands
[0].reg
;
12891 Rm
= inst
.operands
[1].reg
;
12892 Rn
= inst
.operands
[2].reg
;
12894 reject_bad_reg (Rd
);
12895 reject_bad_reg (Rn
);
12896 reject_bad_reg (Rm
);
12898 inst
.instruction
|= Rd
<< 8;
12899 inst
.instruction
|= Rn
<< 16;
12900 inst
.instruction
|= Rm
;
12906 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12907 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12908 _("SMC is not permitted on this architecture"));
12909 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12910 _("expression too complex"));
12911 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12912 inst
.instruction
|= (value
& 0xf000) >> 12;
12913 inst
.instruction
|= (value
& 0x0ff0);
12914 inst
.instruction
|= (value
& 0x000f) << 16;
12915 /* PR gas/15623: SMC instructions must be last in an IT block. */
12916 set_it_insn_type_last ();
12922 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12924 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12925 inst
.instruction
|= (value
& 0x0fff);
12926 inst
.instruction
|= (value
& 0xf000) << 4;
12930 do_t_ssat_usat (int bias
)
12934 Rd
= inst
.operands
[0].reg
;
12935 Rn
= inst
.operands
[2].reg
;
12937 reject_bad_reg (Rd
);
12938 reject_bad_reg (Rn
);
12940 inst
.instruction
|= Rd
<< 8;
12941 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12942 inst
.instruction
|= Rn
<< 16;
12944 if (inst
.operands
[3].present
)
12946 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12948 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12950 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12951 _("expression too complex"));
12953 if (shift_amount
!= 0)
12955 constraint (shift_amount
> 31,
12956 _("shift expression is too large"));
12958 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
12959 inst
.instruction
|= 0x00200000; /* sh bit. */
12961 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
12962 inst
.instruction
|= (shift_amount
& 0x03) << 6;
12970 do_t_ssat_usat (1);
12978 Rd
= inst
.operands
[0].reg
;
12979 Rn
= inst
.operands
[2].reg
;
12981 reject_bad_reg (Rd
);
12982 reject_bad_reg (Rn
);
12984 inst
.instruction
|= Rd
<< 8;
12985 inst
.instruction
|= inst
.operands
[1].imm
- 1;
12986 inst
.instruction
|= Rn
<< 16;
12992 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
12993 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
12994 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
12995 || inst
.operands
[2].negative
,
12998 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13000 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13001 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13002 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13003 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13009 if (!inst
.operands
[2].present
)
13010 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13012 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13013 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13014 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13017 inst
.instruction
|= inst
.operands
[0].reg
;
13018 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13019 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13020 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13026 unsigned Rd
, Rn
, Rm
;
13028 Rd
= inst
.operands
[0].reg
;
13029 Rn
= inst
.operands
[1].reg
;
13030 Rm
= inst
.operands
[2].reg
;
13032 reject_bad_reg (Rd
);
13033 reject_bad_reg (Rn
);
13034 reject_bad_reg (Rm
);
13036 inst
.instruction
|= Rd
<< 8;
13037 inst
.instruction
|= Rn
<< 16;
13038 inst
.instruction
|= Rm
;
13039 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13047 Rd
= inst
.operands
[0].reg
;
13048 Rm
= inst
.operands
[1].reg
;
13050 reject_bad_reg (Rd
);
13051 reject_bad_reg (Rm
);
13053 if (inst
.instruction
<= 0xffff
13054 && inst
.size_req
!= 4
13055 && Rd
<= 7 && Rm
<= 7
13056 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13058 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13059 inst
.instruction
|= Rd
;
13060 inst
.instruction
|= Rm
<< 3;
13062 else if (unified_syntax
)
13064 if (inst
.instruction
<= 0xffff)
13065 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13066 inst
.instruction
|= Rd
<< 8;
13067 inst
.instruction
|= Rm
;
13068 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13072 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13073 _("Thumb encoding does not support rotation"));
13074 constraint (1, BAD_HIREG
);
13081 /* We have to do the following check manually as ARM_EXT_OS only applies
13083 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
13085 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
13086 /* This only applies to the v6m however, not later architectures. */
13087 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
13088 as_bad (_("SVC is not permitted on this architecture"));
13089 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
13092 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
13101 half
= (inst
.instruction
& 0x10) != 0;
13102 set_it_insn_type_last ();
13103 constraint (inst
.operands
[0].immisreg
,
13104 _("instruction requires register index"));
13106 Rn
= inst
.operands
[0].reg
;
13107 Rm
= inst
.operands
[0].imm
;
13109 constraint (Rn
== REG_SP
, BAD_SP
);
13110 reject_bad_reg (Rm
);
13112 constraint (!half
&& inst
.operands
[0].shifted
,
13113 _("instruction does not allow shifted index"));
13114 inst
.instruction
|= (Rn
<< 16) | Rm
;
13120 if (!inst
.operands
[0].present
)
13121 inst
.operands
[0].imm
= 0;
13123 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13125 constraint (inst
.size_req
== 2,
13126 _("immediate value out of range"));
13127 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13128 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13129 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13133 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13134 inst
.instruction
|= inst
.operands
[0].imm
;
13137 set_it_insn_type (NEUTRAL_IT_INSN
);
13144 do_t_ssat_usat (0);
13152 Rd
= inst
.operands
[0].reg
;
13153 Rn
= inst
.operands
[2].reg
;
13155 reject_bad_reg (Rd
);
13156 reject_bad_reg (Rn
);
13158 inst
.instruction
|= Rd
<< 8;
13159 inst
.instruction
|= inst
.operands
[1].imm
;
13160 inst
.instruction
|= Rn
<< 16;
13163 /* Neon instruction encoder helpers. */
13165 /* Encodings for the different types for various Neon opcodes. */
13167 /* An "invalid" code for the following tables. */
13170 struct neon_tab_entry
13173 unsigned float_or_poly
;
13174 unsigned scalar_or_imm
;
13177 /* Map overloaded Neon opcodes to their respective encodings. */
13178 #define NEON_ENC_TAB \
13179 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13180 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13181 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13182 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13183 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13184 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13185 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13186 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13187 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13188 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13189 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13190 /* Register variants of the following two instructions are encoded as
13191 vcge / vcgt with the operands reversed. */ \
13192 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13193 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13194 X(vfma, N_INV, 0x0000c10, N_INV), \
13195 X(vfms, N_INV, 0x0200c10, N_INV), \
13196 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13197 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13198 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13199 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13200 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13201 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13202 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13203 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13204 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13205 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13206 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13207 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13208 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13209 X(vshl, 0x0000400, N_INV, 0x0800510), \
13210 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13211 X(vand, 0x0000110, N_INV, 0x0800030), \
13212 X(vbic, 0x0100110, N_INV, 0x0800030), \
13213 X(veor, 0x1000110, N_INV, N_INV), \
13214 X(vorn, 0x0300110, N_INV, 0x0800010), \
13215 X(vorr, 0x0200110, N_INV, 0x0800010), \
13216 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13217 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13218 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13219 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13220 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13221 X(vst1, 0x0000000, 0x0800000, N_INV), \
13222 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13223 X(vst2, 0x0000100, 0x0800100, N_INV), \
13224 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13225 X(vst3, 0x0000200, 0x0800200, N_INV), \
13226 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13227 X(vst4, 0x0000300, 0x0800300, N_INV), \
13228 X(vmovn, 0x1b20200, N_INV, N_INV), \
13229 X(vtrn, 0x1b20080, N_INV, N_INV), \
13230 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13231 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13232 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13233 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13234 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13235 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13236 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13237 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13238 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13239 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13240 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13241 X(vseleq, 0xe000a00, N_INV, N_INV), \
13242 X(vselvs, 0xe100a00, N_INV, N_INV), \
13243 X(vselge, 0xe200a00, N_INV, N_INV), \
13244 X(vselgt, 0xe300a00, N_INV, N_INV), \
13245 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13246 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13247 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13248 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13249 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13250 X(aes, 0x3b00300, N_INV, N_INV), \
13251 X(sha3op, 0x2000c00, N_INV, N_INV), \
13252 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13253 X(sha2op, 0x3ba0380, N_INV, N_INV)
13257 #define X(OPC,I,F,S) N_MNEM_##OPC
13262 static const struct neon_tab_entry neon_enc_tab
[] =
13264 #define X(OPC,I,F,S) { (I), (F), (S) }
13269 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13270 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13271 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13272 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13273 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13274 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13275 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13276 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13277 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13278 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13279 #define NEON_ENC_SINGLE_(X) \
13280 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13281 #define NEON_ENC_DOUBLE_(X) \
13282 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13283 #define NEON_ENC_FPV8_(X) \
13284 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13286 #define NEON_ENCODE(type, inst) \
13289 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13290 inst.is_neon = 1; \
13294 #define check_neon_suffixes \
13297 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13299 as_bad (_("invalid neon suffix for non neon instruction")); \
13305 /* Define shapes for instruction operands. The following mnemonic characters
13306 are used in this table:
13308 F - VFP S<n> register
13309 D - Neon D<n> register
13310 Q - Neon Q<n> register
13314 L - D<n> register list
13316 This table is used to generate various data:
13317 - enumerations of the form NS_DDR to be used as arguments to
13319 - a table classifying shapes into single, double, quad, mixed.
13320 - a table used to drive neon_select_shape. */
13322 #define NEON_SHAPE_DEF \
13323 X(3, (D, D, D), DOUBLE), \
13324 X(3, (Q, Q, Q), QUAD), \
13325 X(3, (D, D, I), DOUBLE), \
13326 X(3, (Q, Q, I), QUAD), \
13327 X(3, (D, D, S), DOUBLE), \
13328 X(3, (Q, Q, S), QUAD), \
13329 X(2, (D, D), DOUBLE), \
13330 X(2, (Q, Q), QUAD), \
13331 X(2, (D, S), DOUBLE), \
13332 X(2, (Q, S), QUAD), \
13333 X(2, (D, R), DOUBLE), \
13334 X(2, (Q, R), QUAD), \
13335 X(2, (D, I), DOUBLE), \
13336 X(2, (Q, I), QUAD), \
13337 X(3, (D, L, D), DOUBLE), \
13338 X(2, (D, Q), MIXED), \
13339 X(2, (Q, D), MIXED), \
13340 X(3, (D, Q, I), MIXED), \
13341 X(3, (Q, D, I), MIXED), \
13342 X(3, (Q, D, D), MIXED), \
13343 X(3, (D, Q, Q), MIXED), \
13344 X(3, (Q, Q, D), MIXED), \
13345 X(3, (Q, D, S), MIXED), \
13346 X(3, (D, Q, S), MIXED), \
13347 X(4, (D, D, D, I), DOUBLE), \
13348 X(4, (Q, Q, Q, I), QUAD), \
13349 X(2, (F, F), SINGLE), \
13350 X(3, (F, F, F), SINGLE), \
13351 X(2, (F, I), SINGLE), \
13352 X(2, (F, D), MIXED), \
13353 X(2, (D, F), MIXED), \
13354 X(3, (F, F, I), MIXED), \
13355 X(4, (R, R, F, F), SINGLE), \
13356 X(4, (F, F, R, R), SINGLE), \
13357 X(3, (D, R, R), DOUBLE), \
13358 X(3, (R, R, D), DOUBLE), \
13359 X(2, (S, R), SINGLE), \
13360 X(2, (R, S), SINGLE), \
13361 X(2, (F, R), SINGLE), \
13362 X(2, (R, F), SINGLE), \
13363 /* Half float shape supported so far. */\
13364 X (2, (H, D), MIXED), \
13365 X (2, (D, H), MIXED), \
13366 X (2, (H, F), MIXED), \
13367 X (2, (F, H), MIXED), \
13368 X (2, (H, H), HALF), \
13369 X (2, (H, R), HALF), \
13370 X (2, (R, H), HALF), \
13371 X (2, (H, I), HALF), \
13372 X (3, (H, H, H), HALF), \
13373 X (3, (H, F, I), MIXED), \
13374 X (3, (F, H, I), MIXED)
13376 #define S2(A,B) NS_##A##B
13377 #define S3(A,B,C) NS_##A##B##C
13378 #define S4(A,B,C,D) NS_##A##B##C##D
13380 #define X(N, L, C) S##N L
13393 enum neon_shape_class
13402 #define X(N, L, C) SC_##C
13404 static enum neon_shape_class neon_shape_class
[] =
13423 /* Register widths of above. */
13424 static unsigned neon_shape_el_size
[] =
13436 struct neon_shape_info
13439 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13442 #define S2(A,B) { SE_##A, SE_##B }
13443 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13444 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13446 #define X(N, L, C) { N, S##N L }
13448 static struct neon_shape_info neon_shape_tab
[] =
13458 /* Bit masks used in type checking given instructions.
13459 'N_EQK' means the type must be the same as (or based on in some way) the key
13460 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13461 set, various other bits can be set as well in order to modify the meaning of
13462 the type constraint. */
13464 enum neon_type_mask
13488 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13489 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13490 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13491 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13492 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13493 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13494 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13495 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13496 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13497 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13498 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13500 N_MAX_NONSPECIAL
= N_P64
13503 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13505 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13506 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13507 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13508 #define N_S_32 (N_S8 | N_S16 | N_S32)
13509 #define N_F_16_32 (N_F16 | N_F32)
13510 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13511 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13512 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13513 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13515 /* Pass this as the first type argument to neon_check_type to ignore types
13517 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13519 /* Select a "shape" for the current instruction (describing register types or
13520 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13521 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13522 function of operand parsing, so this function doesn't need to be called.
13523 Shapes should be listed in order of decreasing length. */
13525 static enum neon_shape
13526 neon_select_shape (enum neon_shape shape
, ...)
13529 enum neon_shape first_shape
= shape
;
13531 /* Fix missing optional operands. FIXME: we don't know at this point how
13532 many arguments we should have, so this makes the assumption that we have
13533 > 1. This is true of all current Neon opcodes, I think, but may not be
13534 true in the future. */
13535 if (!inst
.operands
[1].present
)
13536 inst
.operands
[1] = inst
.operands
[0];
13538 va_start (ap
, shape
);
13540 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13545 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13547 if (!inst
.operands
[j
].present
)
13553 switch (neon_shape_tab
[shape
].el
[j
])
13555 /* If a .f16, .16, .u16, .s16 type specifier is given over
13556 a VFP single precision register operand, it's essentially
13557 means only half of the register is used.
13559 If the type specifier is given after the mnemonics, the
13560 information is stored in inst.vectype. If the type specifier
13561 is given after register operand, the information is stored
13562 in inst.operands[].vectype.
13564 When there is only one type specifier, and all the register
13565 operands are the same type of hardware register, the type
13566 specifier applies to all register operands.
13568 If no type specifier is given, the shape is inferred from
13569 operand information.
13572 vadd.f16 s0, s1, s2: NS_HHH
13573 vabs.f16 s0, s1: NS_HH
13574 vmov.f16 s0, r1: NS_HR
13575 vmov.f16 r0, s1: NS_RH
13576 vcvt.f16 r0, s1: NS_RH
13577 vcvt.f16.s32 s2, s2, #29: NS_HFI
13578 vcvt.f16.s32 s2, s2: NS_HF
13581 if (!(inst
.operands
[j
].isreg
13582 && inst
.operands
[j
].isvec
13583 && inst
.operands
[j
].issingle
13584 && !inst
.operands
[j
].isquad
13585 && ((inst
.vectype
.elems
== 1
13586 && inst
.vectype
.el
[0].size
== 16)
13587 || (inst
.vectype
.elems
> 1
13588 && inst
.vectype
.el
[j
].size
== 16)
13589 || (inst
.vectype
.elems
== 0
13590 && inst
.operands
[j
].vectype
.type
!= NT_invtype
13591 && inst
.operands
[j
].vectype
.size
== 16))))
13596 if (!(inst
.operands
[j
].isreg
13597 && inst
.operands
[j
].isvec
13598 && inst
.operands
[j
].issingle
13599 && !inst
.operands
[j
].isquad
13600 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
13601 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
13602 || (inst
.vectype
.elems
== 0
13603 && (inst
.operands
[j
].vectype
.size
== 32
13604 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
13609 if (!(inst
.operands
[j
].isreg
13610 && inst
.operands
[j
].isvec
13611 && !inst
.operands
[j
].isquad
13612 && !inst
.operands
[j
].issingle
))
13617 if (!(inst
.operands
[j
].isreg
13618 && !inst
.operands
[j
].isvec
))
13623 if (!(inst
.operands
[j
].isreg
13624 && inst
.operands
[j
].isvec
13625 && inst
.operands
[j
].isquad
13626 && !inst
.operands
[j
].issingle
))
13631 if (!(!inst
.operands
[j
].isreg
13632 && !inst
.operands
[j
].isscalar
))
13637 if (!(!inst
.operands
[j
].isreg
13638 && inst
.operands
[j
].isscalar
))
13648 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13649 /* We've matched all the entries in the shape table, and we don't
13650 have any left over operands which have not been matched. */
13656 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13657 first_error (_("invalid instruction shape"));
13662 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13663 means the Q bit should be set). */
13666 neon_quad (enum neon_shape shape
)
13668 return neon_shape_class
[shape
] == SC_QUAD
;
13672 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13675 /* Allow modification to be made to types which are constrained to be
13676 based on the key element, based on bits set alongside N_EQK. */
13677 if ((typebits
& N_EQK
) != 0)
13679 if ((typebits
& N_HLF
) != 0)
13681 else if ((typebits
& N_DBL
) != 0)
13683 if ((typebits
& N_SGN
) != 0)
13684 *g_type
= NT_signed
;
13685 else if ((typebits
& N_UNS
) != 0)
13686 *g_type
= NT_unsigned
;
13687 else if ((typebits
& N_INT
) != 0)
13688 *g_type
= NT_integer
;
13689 else if ((typebits
& N_FLT
) != 0)
13690 *g_type
= NT_float
;
13691 else if ((typebits
& N_SIZ
) != 0)
13692 *g_type
= NT_untyped
;
13696 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13697 operand type, i.e. the single type specified in a Neon instruction when it
13698 is the only one given. */
13700 static struct neon_type_el
13701 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13703 struct neon_type_el dest
= *key
;
13705 gas_assert ((thisarg
& N_EQK
) != 0);
13707 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13712 /* Convert Neon type and size into compact bitmask representation. */
13714 static enum neon_type_mask
13715 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13722 case 8: return N_8
;
13723 case 16: return N_16
;
13724 case 32: return N_32
;
13725 case 64: return N_64
;
13733 case 8: return N_I8
;
13734 case 16: return N_I16
;
13735 case 32: return N_I32
;
13736 case 64: return N_I64
;
13744 case 16: return N_F16
;
13745 case 32: return N_F32
;
13746 case 64: return N_F64
;
13754 case 8: return N_P8
;
13755 case 16: return N_P16
;
13756 case 64: return N_P64
;
13764 case 8: return N_S8
;
13765 case 16: return N_S16
;
13766 case 32: return N_S32
;
13767 case 64: return N_S64
;
13775 case 8: return N_U8
;
13776 case 16: return N_U16
;
13777 case 32: return N_U32
;
13778 case 64: return N_U64
;
13789 /* Convert compact Neon bitmask type representation to a type and size. Only
13790 handles the case where a single bit is set in the mask. */
13793 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13794 enum neon_type_mask mask
)
13796 if ((mask
& N_EQK
) != 0)
13799 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13801 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13803 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13805 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13810 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13812 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13813 *type
= NT_unsigned
;
13814 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13815 *type
= NT_integer
;
13816 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13817 *type
= NT_untyped
;
13818 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13820 else if ((mask
& (N_F_ALL
)) != 0)
13828 /* Modify a bitmask of allowed types. This is only needed for type
13832 modify_types_allowed (unsigned allowed
, unsigned mods
)
13835 enum neon_el_type type
;
13841 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13843 if (el_type_of_type_chk (&type
, &size
,
13844 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13846 neon_modify_type_size (mods
, &type
, &size
);
13847 destmask
|= type_chk_of_el_type (type
, size
);
13854 /* Check type and return type classification.
13855 The manual states (paraphrase): If one datatype is given, it indicates the
13857 - the second operand, if there is one
13858 - the operand, if there is no second operand
13859 - the result, if there are no operands.
13860 This isn't quite good enough though, so we use a concept of a "key" datatype
13861 which is set on a per-instruction basis, which is the one which matters when
13862 only one data type is written.
13863 Note: this function has side-effects (e.g. filling in missing operands). All
13864 Neon instructions should call it before performing bit encoding. */
13866 static struct neon_type_el
13867 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13870 unsigned i
, pass
, key_el
= 0;
13871 unsigned types
[NEON_MAX_TYPE_ELS
];
13872 enum neon_el_type k_type
= NT_invtype
;
13873 unsigned k_size
= -1u;
13874 struct neon_type_el badtype
= {NT_invtype
, -1};
13875 unsigned key_allowed
= 0;
13877 /* Optional registers in Neon instructions are always (not) in operand 1.
13878 Fill in the missing operand here, if it was omitted. */
13879 if (els
> 1 && !inst
.operands
[1].present
)
13880 inst
.operands
[1] = inst
.operands
[0];
13882 /* Suck up all the varargs. */
13884 for (i
= 0; i
< els
; i
++)
13886 unsigned thisarg
= va_arg (ap
, unsigned);
13887 if (thisarg
== N_IGNORE_TYPE
)
13892 types
[i
] = thisarg
;
13893 if ((thisarg
& N_KEY
) != 0)
13898 if (inst
.vectype
.elems
> 0)
13899 for (i
= 0; i
< els
; i
++)
13900 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13902 first_error (_("types specified in both the mnemonic and operands"));
13906 /* Duplicate inst.vectype elements here as necessary.
13907 FIXME: No idea if this is exactly the same as the ARM assembler,
13908 particularly when an insn takes one register and one non-register
13910 if (inst
.vectype
.elems
== 1 && els
> 1)
13913 inst
.vectype
.elems
= els
;
13914 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13915 for (j
= 0; j
< els
; j
++)
13917 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13920 else if (inst
.vectype
.elems
== 0 && els
> 0)
13923 /* No types were given after the mnemonic, so look for types specified
13924 after each operand. We allow some flexibility here; as long as the
13925 "key" operand has a type, we can infer the others. */
13926 for (j
= 0; j
< els
; j
++)
13927 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13928 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13930 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13932 for (j
= 0; j
< els
; j
++)
13933 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13934 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13939 first_error (_("operand types can't be inferred"));
13943 else if (inst
.vectype
.elems
!= els
)
13945 first_error (_("type specifier has the wrong number of parts"));
13949 for (pass
= 0; pass
< 2; pass
++)
13951 for (i
= 0; i
< els
; i
++)
13953 unsigned thisarg
= types
[i
];
13954 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13955 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
13956 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
13957 unsigned g_size
= inst
.vectype
.el
[i
].size
;
13959 /* Decay more-specific signed & unsigned types to sign-insensitive
13960 integer types if sign-specific variants are unavailable. */
13961 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
13962 && (types_allowed
& N_SU_ALL
) == 0)
13963 g_type
= NT_integer
;
13965 /* If only untyped args are allowed, decay any more specific types to
13966 them. Some instructions only care about signs for some element
13967 sizes, so handle that properly. */
13968 if (((types_allowed
& N_UNT
) == 0)
13969 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
13970 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
13971 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
13972 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
13973 g_type
= NT_untyped
;
13977 if ((thisarg
& N_KEY
) != 0)
13981 key_allowed
= thisarg
& ~N_KEY
;
13983 /* Check architecture constraint on FP16 extension. */
13985 && k_type
== NT_float
13986 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
13988 inst
.error
= _(BAD_FP16
);
13995 if ((thisarg
& N_VFP
) != 0)
13997 enum neon_shape_el regshape
;
13998 unsigned regwidth
, match
;
14000 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14003 first_error (_("invalid instruction shape"));
14006 regshape
= neon_shape_tab
[ns
].el
[i
];
14007 regwidth
= neon_shape_el_size
[regshape
];
14009 /* In VFP mode, operands must match register widths. If we
14010 have a key operand, use its width, else use the width of
14011 the current operand. */
14017 /* FP16 will use a single precision register. */
14018 if (regwidth
== 32 && match
== 16)
14020 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14024 inst
.error
= _(BAD_FP16
);
14029 if (regwidth
!= match
)
14031 first_error (_("operand size must match register width"));
14036 if ((thisarg
& N_EQK
) == 0)
14038 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14040 if ((given_type
& types_allowed
) == 0)
14042 first_error (_("bad type in Neon instruction"));
14048 enum neon_el_type mod_k_type
= k_type
;
14049 unsigned mod_k_size
= k_size
;
14050 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14051 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14053 first_error (_("inconsistent types in Neon instruction"));
14061 return inst
.vectype
.el
[key_el
];
14064 /* Neon-style VFP instruction forwarding. */
14066 /* Thumb VFP instructions have 0xE in the condition field. */
14069 do_vfp_cond_or_thumb (void)
14074 inst
.instruction
|= 0xe0000000;
14076 inst
.instruction
|= inst
.cond
<< 28;
14079 /* Look up and encode a simple mnemonic, for use as a helper function for the
14080 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14081 etc. It is assumed that operand parsing has already been done, and that the
14082 operands are in the form expected by the given opcode (this isn't necessarily
14083 the same as the form in which they were parsed, hence some massaging must
14084 take place before this function is called).
14085 Checks current arch version against that in the looked-up opcode. */
14088 do_vfp_nsyn_opcode (const char *opname
)
14090 const struct asm_opcode
*opcode
;
14092 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14097 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14098 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14105 inst
.instruction
= opcode
->tvalue
;
14106 opcode
->tencode ();
14110 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14111 opcode
->aencode ();
14116 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14118 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14120 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14123 do_vfp_nsyn_opcode ("fadds");
14125 do_vfp_nsyn_opcode ("fsubs");
14127 /* ARMv8.2 fp16 instruction. */
14129 do_scalar_fp16_v82_encode ();
14134 do_vfp_nsyn_opcode ("faddd");
14136 do_vfp_nsyn_opcode ("fsubd");
14140 /* Check operand types to see if this is a VFP instruction, and if so call
14144 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14146 enum neon_shape rs
;
14147 struct neon_type_el et
;
14152 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14153 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14157 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14158 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14159 N_F_ALL
| N_KEY
| N_VFP
);
14166 if (et
.type
!= NT_invtype
)
14177 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14179 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14181 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14184 do_vfp_nsyn_opcode ("fmacs");
14186 do_vfp_nsyn_opcode ("fnmacs");
14188 /* ARMv8.2 fp16 instruction. */
14190 do_scalar_fp16_v82_encode ();
14195 do_vfp_nsyn_opcode ("fmacd");
14197 do_vfp_nsyn_opcode ("fnmacd");
14202 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14204 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14206 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14209 do_vfp_nsyn_opcode ("ffmas");
14211 do_vfp_nsyn_opcode ("ffnmas");
14213 /* ARMv8.2 fp16 instruction. */
14215 do_scalar_fp16_v82_encode ();
14220 do_vfp_nsyn_opcode ("ffmad");
14222 do_vfp_nsyn_opcode ("ffnmad");
14227 do_vfp_nsyn_mul (enum neon_shape rs
)
14229 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14231 do_vfp_nsyn_opcode ("fmuls");
14233 /* ARMv8.2 fp16 instruction. */
14235 do_scalar_fp16_v82_encode ();
14238 do_vfp_nsyn_opcode ("fmuld");
14242 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14244 int is_neg
= (inst
.instruction
& 0x80) != 0;
14245 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14247 if (rs
== NS_FF
|| rs
== NS_HH
)
14250 do_vfp_nsyn_opcode ("fnegs");
14252 do_vfp_nsyn_opcode ("fabss");
14254 /* ARMv8.2 fp16 instruction. */
14256 do_scalar_fp16_v82_encode ();
14261 do_vfp_nsyn_opcode ("fnegd");
14263 do_vfp_nsyn_opcode ("fabsd");
14267 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14268 insns belong to Neon, and are handled elsewhere. */
14271 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14273 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14277 do_vfp_nsyn_opcode ("fldmdbs");
14279 do_vfp_nsyn_opcode ("fldmias");
14284 do_vfp_nsyn_opcode ("fstmdbs");
14286 do_vfp_nsyn_opcode ("fstmias");
14291 do_vfp_nsyn_sqrt (void)
14293 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14294 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14296 if (rs
== NS_FF
|| rs
== NS_HH
)
14298 do_vfp_nsyn_opcode ("fsqrts");
14300 /* ARMv8.2 fp16 instruction. */
14302 do_scalar_fp16_v82_encode ();
14305 do_vfp_nsyn_opcode ("fsqrtd");
14309 do_vfp_nsyn_div (void)
14311 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14312 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14313 N_F_ALL
| N_KEY
| N_VFP
);
14315 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14317 do_vfp_nsyn_opcode ("fdivs");
14319 /* ARMv8.2 fp16 instruction. */
14321 do_scalar_fp16_v82_encode ();
14324 do_vfp_nsyn_opcode ("fdivd");
14328 do_vfp_nsyn_nmul (void)
14330 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14331 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14332 N_F_ALL
| N_KEY
| N_VFP
);
14334 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14336 NEON_ENCODE (SINGLE
, inst
);
14337 do_vfp_sp_dyadic ();
14339 /* ARMv8.2 fp16 instruction. */
14341 do_scalar_fp16_v82_encode ();
14345 NEON_ENCODE (DOUBLE
, inst
);
14346 do_vfp_dp_rd_rn_rm ();
14348 do_vfp_cond_or_thumb ();
14353 do_vfp_nsyn_cmp (void)
14355 enum neon_shape rs
;
14356 if (inst
.operands
[1].isreg
)
14358 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14359 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14361 if (rs
== NS_FF
|| rs
== NS_HH
)
14363 NEON_ENCODE (SINGLE
, inst
);
14364 do_vfp_sp_monadic ();
14368 NEON_ENCODE (DOUBLE
, inst
);
14369 do_vfp_dp_rd_rm ();
14374 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14375 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14377 switch (inst
.instruction
& 0x0fffffff)
14380 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14383 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14389 if (rs
== NS_FI
|| rs
== NS_HI
)
14391 NEON_ENCODE (SINGLE
, inst
);
14392 do_vfp_sp_compare_z ();
14396 NEON_ENCODE (DOUBLE
, inst
);
14400 do_vfp_cond_or_thumb ();
14402 /* ARMv8.2 fp16 instruction. */
14403 if (rs
== NS_HI
|| rs
== NS_HH
)
14404 do_scalar_fp16_v82_encode ();
14408 nsyn_insert_sp (void)
14410 inst
.operands
[1] = inst
.operands
[0];
14411 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14412 inst
.operands
[0].reg
= REG_SP
;
14413 inst
.operands
[0].isreg
= 1;
14414 inst
.operands
[0].writeback
= 1;
14415 inst
.operands
[0].present
= 1;
14419 do_vfp_nsyn_push (void)
14423 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14424 _("register list must contain at least 1 and at most 16 "
14427 if (inst
.operands
[1].issingle
)
14428 do_vfp_nsyn_opcode ("fstmdbs");
14430 do_vfp_nsyn_opcode ("fstmdbd");
14434 do_vfp_nsyn_pop (void)
14438 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14439 _("register list must contain at least 1 and at most 16 "
14442 if (inst
.operands
[1].issingle
)
14443 do_vfp_nsyn_opcode ("fldmias");
14445 do_vfp_nsyn_opcode ("fldmiad");
14448 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14449 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14452 neon_dp_fixup (struct arm_it
* insn
)
14454 unsigned int i
= insn
->instruction
;
14459 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14470 insn
->instruction
= i
;
14473 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14477 neon_logbits (unsigned x
)
14479 return ffs (x
) - 4;
14482 #define LOW4(R) ((R) & 0xf)
14483 #define HI1(R) (((R) >> 4) & 1)
14485 /* Encode insns with bit pattern:
14487 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14488 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14490 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14491 different meaning for some instruction. */
14494 neon_three_same (int isquad
, int ubit
, int size
)
14496 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14497 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14498 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14499 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14500 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14501 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14502 inst
.instruction
|= (isquad
!= 0) << 6;
14503 inst
.instruction
|= (ubit
!= 0) << 24;
14505 inst
.instruction
|= neon_logbits (size
) << 20;
14507 neon_dp_fixup (&inst
);
14510 /* Encode instructions of the form:
14512 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14513 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14515 Don't write size if SIZE == -1. */
14518 neon_two_same (int qbit
, int ubit
, int size
)
14520 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14521 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14522 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14523 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14524 inst
.instruction
|= (qbit
!= 0) << 6;
14525 inst
.instruction
|= (ubit
!= 0) << 24;
14528 inst
.instruction
|= neon_logbits (size
) << 18;
14530 neon_dp_fixup (&inst
);
14533 /* Neon instruction encoders, in approximate order of appearance. */
14536 do_neon_dyadic_i_su (void)
14538 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14539 struct neon_type_el et
= neon_check_type (3, rs
,
14540 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14541 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14545 do_neon_dyadic_i64_su (void)
14547 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14548 struct neon_type_el et
= neon_check_type (3, rs
,
14549 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14550 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14554 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14557 unsigned size
= et
.size
>> 3;
14558 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14559 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14560 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14561 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14562 inst
.instruction
|= (isquad
!= 0) << 6;
14563 inst
.instruction
|= immbits
<< 16;
14564 inst
.instruction
|= (size
>> 3) << 7;
14565 inst
.instruction
|= (size
& 0x7) << 19;
14567 inst
.instruction
|= (uval
!= 0) << 24;
14569 neon_dp_fixup (&inst
);
14573 do_neon_shl_imm (void)
14575 if (!inst
.operands
[2].isreg
)
14577 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14578 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14579 int imm
= inst
.operands
[2].imm
;
14581 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14582 _("immediate out of range for shift"));
14583 NEON_ENCODE (IMMED
, inst
);
14584 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14588 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14589 struct neon_type_el et
= neon_check_type (3, rs
,
14590 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14593 /* VSHL/VQSHL 3-register variants have syntax such as:
14595 whereas other 3-register operations encoded by neon_three_same have
14598 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14600 tmp
= inst
.operands
[2].reg
;
14601 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14602 inst
.operands
[1].reg
= tmp
;
14603 NEON_ENCODE (INTEGER
, inst
);
14604 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14609 do_neon_qshl_imm (void)
14611 if (!inst
.operands
[2].isreg
)
14613 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14614 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14615 int imm
= inst
.operands
[2].imm
;
14617 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14618 _("immediate out of range for shift"));
14619 NEON_ENCODE (IMMED
, inst
);
14620 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14624 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14625 struct neon_type_el et
= neon_check_type (3, rs
,
14626 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14629 /* See note in do_neon_shl_imm. */
14630 tmp
= inst
.operands
[2].reg
;
14631 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14632 inst
.operands
[1].reg
= tmp
;
14633 NEON_ENCODE (INTEGER
, inst
);
14634 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14639 do_neon_rshl (void)
14641 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14642 struct neon_type_el et
= neon_check_type (3, rs
,
14643 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14646 tmp
= inst
.operands
[2].reg
;
14647 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14648 inst
.operands
[1].reg
= tmp
;
14649 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14653 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14655 /* Handle .I8 pseudo-instructions. */
14658 /* Unfortunately, this will make everything apart from zero out-of-range.
14659 FIXME is this the intended semantics? There doesn't seem much point in
14660 accepting .I8 if so. */
14661 immediate
|= immediate
<< 8;
14667 if (immediate
== (immediate
& 0x000000ff))
14669 *immbits
= immediate
;
14672 else if (immediate
== (immediate
& 0x0000ff00))
14674 *immbits
= immediate
>> 8;
14677 else if (immediate
== (immediate
& 0x00ff0000))
14679 *immbits
= immediate
>> 16;
14682 else if (immediate
== (immediate
& 0xff000000))
14684 *immbits
= immediate
>> 24;
14687 if ((immediate
& 0xffff) != (immediate
>> 16))
14688 goto bad_immediate
;
14689 immediate
&= 0xffff;
14692 if (immediate
== (immediate
& 0x000000ff))
14694 *immbits
= immediate
;
14697 else if (immediate
== (immediate
& 0x0000ff00))
14699 *immbits
= immediate
>> 8;
14704 first_error (_("immediate value out of range"));
14709 do_neon_logic (void)
14711 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14713 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14714 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14715 /* U bit and size field were set as part of the bitmask. */
14716 NEON_ENCODE (INTEGER
, inst
);
14717 neon_three_same (neon_quad (rs
), 0, -1);
14721 const int three_ops_form
= (inst
.operands
[2].present
14722 && !inst
.operands
[2].isreg
);
14723 const int immoperand
= (three_ops_form
? 2 : 1);
14724 enum neon_shape rs
= (three_ops_form
14725 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14726 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14727 struct neon_type_el et
= neon_check_type (2, rs
,
14728 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14729 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14733 if (et
.type
== NT_invtype
)
14736 if (three_ops_form
)
14737 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14738 _("first and second operands shall be the same register"));
14740 NEON_ENCODE (IMMED
, inst
);
14742 immbits
= inst
.operands
[immoperand
].imm
;
14745 /* .i64 is a pseudo-op, so the immediate must be a repeating
14747 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14748 inst
.operands
[immoperand
].reg
: 0))
14750 /* Set immbits to an invalid constant. */
14751 immbits
= 0xdeadbeef;
14758 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14762 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14766 /* Pseudo-instruction for VBIC. */
14767 neon_invert_size (&immbits
, 0, et
.size
);
14768 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14772 /* Pseudo-instruction for VORR. */
14773 neon_invert_size (&immbits
, 0, et
.size
);
14774 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14784 inst
.instruction
|= neon_quad (rs
) << 6;
14785 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14786 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14787 inst
.instruction
|= cmode
<< 8;
14788 neon_write_immbits (immbits
);
14790 neon_dp_fixup (&inst
);
14795 do_neon_bitfield (void)
14797 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14798 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14799 neon_three_same (neon_quad (rs
), 0, -1);
14803 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14806 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14807 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14809 if (et
.type
== NT_float
)
14811 NEON_ENCODE (FLOAT
, inst
);
14812 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
14816 NEON_ENCODE (INTEGER
, inst
);
14817 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14822 do_neon_dyadic_if_su (void)
14824 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14828 do_neon_dyadic_if_su_d (void)
14830 /* This version only allow D registers, but that constraint is enforced during
14831 operand parsing so we don't need to do anything extra here. */
14832 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14836 do_neon_dyadic_if_i_d (void)
14838 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14839 affected if we specify unsigned args. */
14840 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14843 enum vfp_or_neon_is_neon_bits
14846 NEON_CHECK_ARCH
= 2,
14847 NEON_CHECK_ARCH8
= 4
14850 /* Call this function if an instruction which may have belonged to the VFP or
14851 Neon instruction sets, but turned out to be a Neon instruction (due to the
14852 operand types involved, etc.). We have to check and/or fix-up a couple of
14855 - Make sure the user hasn't attempted to make a Neon instruction
14857 - Alter the value in the condition code field if necessary.
14858 - Make sure that the arch supports Neon instructions.
14860 Which of these operations take place depends on bits from enum
14861 vfp_or_neon_is_neon_bits.
14863 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14864 current instruction's condition is COND_ALWAYS, the condition field is
14865 changed to inst.uncond_value. This is necessary because instructions shared
14866 between VFP and Neon may be conditional for the VFP variants only, and the
14867 unconditional Neon version must have, e.g., 0xF in the condition field. */
14870 vfp_or_neon_is_neon (unsigned check
)
14872 /* Conditions are always legal in Thumb mode (IT blocks). */
14873 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14875 if (inst
.cond
!= COND_ALWAYS
)
14877 first_error (_(BAD_COND
));
14880 if (inst
.uncond_value
!= -1)
14881 inst
.instruction
|= inst
.uncond_value
<< 28;
14884 if ((check
& NEON_CHECK_ARCH
)
14885 && !mark_feature_used (&fpu_neon_ext_v1
))
14887 first_error (_(BAD_FPU
));
14891 if ((check
& NEON_CHECK_ARCH8
)
14892 && !mark_feature_used (&fpu_neon_ext_armv8
))
14894 first_error (_(BAD_FPU
));
14902 do_neon_addsub_if_i (void)
14904 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14907 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14910 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14911 affected if we specify unsigned args. */
14912 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14915 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14917 V<op> A,B (A is operand 0, B is operand 2)
14922 so handle that case specially. */
14925 neon_exchange_operands (void)
14927 if (inst
.operands
[1].present
)
14929 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
14931 /* Swap operands[1] and operands[2]. */
14932 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14933 inst
.operands
[1] = inst
.operands
[2];
14934 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14939 inst
.operands
[1] = inst
.operands
[2];
14940 inst
.operands
[2] = inst
.operands
[0];
14945 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14947 if (inst
.operands
[2].isreg
)
14950 neon_exchange_operands ();
14951 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14955 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14956 struct neon_type_el et
= neon_check_type (2, rs
,
14957 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
14959 NEON_ENCODE (IMMED
, inst
);
14960 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14961 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14962 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14963 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14964 inst
.instruction
|= neon_quad (rs
) << 6;
14965 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14966 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14968 neon_dp_fixup (&inst
);
14975 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
14979 do_neon_cmp_inv (void)
14981 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
14987 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
14990 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14991 scalars, which are encoded in 5 bits, M : Rm.
14992 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14993 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14997 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
14999 unsigned regno
= NEON_SCALAR_REG (scalar
);
15000 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15005 if (regno
> 7 || elno
> 3)
15007 return regno
| (elno
<< 3);
15010 if (regno
> 15 || elno
> 1)
15012 return regno
| (elno
<< 4);
15016 first_error (_("scalar out of range for multiply instruction"));
15022 /* Encode multiply / multiply-accumulate scalar instructions. */
15025 neon_mul_mac (struct neon_type_el et
, int ubit
)
15029 /* Give a more helpful error message if we have an invalid type. */
15030 if (et
.type
== NT_invtype
)
15033 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15034 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15035 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15036 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15037 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15038 inst
.instruction
|= LOW4 (scalar
);
15039 inst
.instruction
|= HI1 (scalar
) << 5;
15040 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15041 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15042 inst
.instruction
|= (ubit
!= 0) << 24;
15044 neon_dp_fixup (&inst
);
15048 do_neon_mac_maybe_scalar (void)
15050 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15053 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15056 if (inst
.operands
[2].isscalar
)
15058 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15059 struct neon_type_el et
= neon_check_type (3, rs
,
15060 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15061 NEON_ENCODE (SCALAR
, inst
);
15062 neon_mul_mac (et
, neon_quad (rs
));
15066 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15067 affected if we specify unsigned args. */
15068 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15073 do_neon_fmac (void)
15075 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15078 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15081 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15087 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15088 struct neon_type_el et
= neon_check_type (3, rs
,
15089 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15090 neon_three_same (neon_quad (rs
), 0, et
.size
);
15093 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15094 same types as the MAC equivalents. The polynomial type for this instruction
15095 is encoded the same as the integer type. */
15100 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15103 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15106 if (inst
.operands
[2].isscalar
)
15107 do_neon_mac_maybe_scalar ();
15109 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15113 do_neon_qdmulh (void)
15115 if (inst
.operands
[2].isscalar
)
15117 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15118 struct neon_type_el et
= neon_check_type (3, rs
,
15119 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15120 NEON_ENCODE (SCALAR
, inst
);
15121 neon_mul_mac (et
, neon_quad (rs
));
15125 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15126 struct neon_type_el et
= neon_check_type (3, rs
,
15127 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15128 NEON_ENCODE (INTEGER
, inst
);
15129 /* The U bit (rounding) comes from bit mask. */
15130 neon_three_same (neon_quad (rs
), 0, et
.size
);
15135 do_neon_qrdmlah (void)
15137 /* Check we're on the correct architecture. */
15138 if (!mark_feature_used (&fpu_neon_ext_armv8
))
15140 _("instruction form not available on this architecture.");
15141 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
15143 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15144 record_feature_use (&fpu_neon_ext_v8_1
);
15147 if (inst
.operands
[2].isscalar
)
15149 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15150 struct neon_type_el et
= neon_check_type (3, rs
,
15151 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15152 NEON_ENCODE (SCALAR
, inst
);
15153 neon_mul_mac (et
, neon_quad (rs
));
15157 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15158 struct neon_type_el et
= neon_check_type (3, rs
,
15159 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15160 NEON_ENCODE (INTEGER
, inst
);
15161 /* The U bit (rounding) comes from bit mask. */
15162 neon_three_same (neon_quad (rs
), 0, et
.size
);
15167 do_neon_fcmp_absolute (void)
15169 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15170 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15171 N_F_16_32
| N_KEY
);
15172 /* Size field comes from bit mask. */
15173 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
15177 do_neon_fcmp_absolute_inv (void)
15179 neon_exchange_operands ();
15180 do_neon_fcmp_absolute ();
15184 do_neon_step (void)
15186 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15187 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15188 N_F_16_32
| N_KEY
);
15189 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15193 do_neon_abs_neg (void)
15195 enum neon_shape rs
;
15196 struct neon_type_el et
;
15198 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15201 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15204 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15205 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
15207 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15208 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15209 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15210 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15211 inst
.instruction
|= neon_quad (rs
) << 6;
15212 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15213 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15215 neon_dp_fixup (&inst
);
15221 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15222 struct neon_type_el et
= neon_check_type (2, rs
,
15223 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15224 int imm
= inst
.operands
[2].imm
;
15225 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15226 _("immediate out of range for insert"));
15227 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15233 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15234 struct neon_type_el et
= neon_check_type (2, rs
,
15235 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15236 int imm
= inst
.operands
[2].imm
;
15237 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15238 _("immediate out of range for insert"));
15239 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15243 do_neon_qshlu_imm (void)
15245 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15246 struct neon_type_el et
= neon_check_type (2, rs
,
15247 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15248 int imm
= inst
.operands
[2].imm
;
15249 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15250 _("immediate out of range for shift"));
15251 /* Only encodes the 'U present' variant of the instruction.
15252 In this case, signed types have OP (bit 8) set to 0.
15253 Unsigned types have OP set to 1. */
15254 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15255 /* The rest of the bits are the same as other immediate shifts. */
15256 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15260 do_neon_qmovn (void)
15262 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15263 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15264 /* Saturating move where operands can be signed or unsigned, and the
15265 destination has the same signedness. */
15266 NEON_ENCODE (INTEGER
, inst
);
15267 if (et
.type
== NT_unsigned
)
15268 inst
.instruction
|= 0xc0;
15270 inst
.instruction
|= 0x80;
15271 neon_two_same (0, 1, et
.size
/ 2);
15275 do_neon_qmovun (void)
15277 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15278 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15279 /* Saturating move with unsigned results. Operands must be signed. */
15280 NEON_ENCODE (INTEGER
, inst
);
15281 neon_two_same (0, 1, et
.size
/ 2);
15285 do_neon_rshift_sat_narrow (void)
15287 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15288 or unsigned. If operands are unsigned, results must also be unsigned. */
15289 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15290 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15291 int imm
= inst
.operands
[2].imm
;
15292 /* This gets the bounds check, size encoding and immediate bits calculation
15296 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15297 VQMOVN.I<size> <Dd>, <Qm>. */
15300 inst
.operands
[2].present
= 0;
15301 inst
.instruction
= N_MNEM_vqmovn
;
15306 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15307 _("immediate out of range"));
15308 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15312 do_neon_rshift_sat_narrow_u (void)
15314 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15315 or unsigned. If operands are unsigned, results must also be unsigned. */
15316 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15317 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15318 int imm
= inst
.operands
[2].imm
;
15319 /* This gets the bounds check, size encoding and immediate bits calculation
15323 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15324 VQMOVUN.I<size> <Dd>, <Qm>. */
15327 inst
.operands
[2].present
= 0;
15328 inst
.instruction
= N_MNEM_vqmovun
;
15333 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15334 _("immediate out of range"));
15335 /* FIXME: The manual is kind of unclear about what value U should have in
15336 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15338 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15342 do_neon_movn (void)
15344 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15345 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15346 NEON_ENCODE (INTEGER
, inst
);
15347 neon_two_same (0, 1, et
.size
/ 2);
15351 do_neon_rshift_narrow (void)
15353 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15354 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15355 int imm
= inst
.operands
[2].imm
;
15356 /* This gets the bounds check, size encoding and immediate bits calculation
15360 /* If immediate is zero then we are a pseudo-instruction for
15361 VMOVN.I<size> <Dd>, <Qm> */
15364 inst
.operands
[2].present
= 0;
15365 inst
.instruction
= N_MNEM_vmovn
;
15370 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15371 _("immediate out of range for narrowing operation"));
15372 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15376 do_neon_shll (void)
15378 /* FIXME: Type checking when lengthening. */
15379 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15380 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15381 unsigned imm
= inst
.operands
[2].imm
;
15383 if (imm
== et
.size
)
15385 /* Maximum shift variant. */
15386 NEON_ENCODE (INTEGER
, inst
);
15387 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15388 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15389 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15390 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15391 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15393 neon_dp_fixup (&inst
);
15397 /* A more-specific type check for non-max versions. */
15398 et
= neon_check_type (2, NS_QDI
,
15399 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15400 NEON_ENCODE (IMMED
, inst
);
15401 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15405 /* Check the various types for the VCVT instruction, and return which version
15406 the current instruction is. */
15408 #define CVT_FLAVOUR_VAR \
15409 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15410 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15411 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15412 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15413 /* Half-precision conversions. */ \
15414 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15415 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15416 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15417 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15418 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15419 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15420 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15421 Compared with single/double precision variants, only the co-processor \
15422 field is different, so the encoding flow is reused here. */ \
15423 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15424 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15425 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15426 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15427 /* VFP instructions. */ \
15428 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15429 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15430 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15431 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15432 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15433 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15434 /* VFP instructions with bitshift. */ \
15435 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15436 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15437 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15438 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15439 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15440 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15441 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15442 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15444 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15445 neon_cvt_flavour_##C,
15447 /* The different types of conversions we can do. */
15448 enum neon_cvt_flavour
15451 neon_cvt_flavour_invalid
,
15452 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15457 static enum neon_cvt_flavour
15458 get_neon_cvt_flavour (enum neon_shape rs
)
15460 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15461 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15462 if (et.type != NT_invtype) \
15464 inst.error = NULL; \
15465 return (neon_cvt_flavour_##C); \
15468 struct neon_type_el et
;
15469 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15470 || rs
== NS_FF
) ? N_VFP
: 0;
15471 /* The instruction versions which take an immediate take one register
15472 argument, which is extended to the width of the full register. Thus the
15473 "source" and "destination" registers must have the same width. Hack that
15474 here by making the size equal to the key (wider, in this case) operand. */
15475 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15479 return neon_cvt_flavour_invalid
;
15494 /* Neon-syntax VFP conversions. */
15497 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15499 const char *opname
= 0;
15501 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
15502 || rs
== NS_FHI
|| rs
== NS_HFI
)
15504 /* Conversions with immediate bitshift. */
15505 const char *enc
[] =
15507 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15513 if (flavour
< (int) ARRAY_SIZE (enc
))
15515 opname
= enc
[flavour
];
15516 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15517 _("operands 0 and 1 must be the same register"));
15518 inst
.operands
[1] = inst
.operands
[2];
15519 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15524 /* Conversions without bitshift. */
15525 const char *enc
[] =
15527 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15533 if (flavour
< (int) ARRAY_SIZE (enc
))
15534 opname
= enc
[flavour
];
15538 do_vfp_nsyn_opcode (opname
);
15540 /* ARMv8.2 fp16 VCVT instruction. */
15541 if (flavour
== neon_cvt_flavour_s32_f16
15542 || flavour
== neon_cvt_flavour_u32_f16
15543 || flavour
== neon_cvt_flavour_f16_u32
15544 || flavour
== neon_cvt_flavour_f16_s32
)
15545 do_scalar_fp16_v82_encode ();
15549 do_vfp_nsyn_cvtz (void)
15551 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
15552 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15553 const char *enc
[] =
15555 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15561 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15562 do_vfp_nsyn_opcode (enc
[flavour
]);
15566 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15567 enum neon_cvt_mode mode
)
15572 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15573 D register operands. */
15574 if (flavour
== neon_cvt_flavour_s32_f64
15575 || flavour
== neon_cvt_flavour_u32_f64
)
15576 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15579 if (flavour
== neon_cvt_flavour_s32_f16
15580 || flavour
== neon_cvt_flavour_u32_f16
)
15581 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
15584 set_it_insn_type (OUTSIDE_IT_INSN
);
15588 case neon_cvt_flavour_s32_f64
:
15592 case neon_cvt_flavour_s32_f32
:
15596 case neon_cvt_flavour_s32_f16
:
15600 case neon_cvt_flavour_u32_f64
:
15604 case neon_cvt_flavour_u32_f32
:
15608 case neon_cvt_flavour_u32_f16
:
15613 first_error (_("invalid instruction shape"));
15619 case neon_cvt_mode_a
: rm
= 0; break;
15620 case neon_cvt_mode_n
: rm
= 1; break;
15621 case neon_cvt_mode_p
: rm
= 2; break;
15622 case neon_cvt_mode_m
: rm
= 3; break;
15623 default: first_error (_("invalid rounding mode")); return;
15626 NEON_ENCODE (FPV8
, inst
);
15627 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15628 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15629 inst
.instruction
|= sz
<< 8;
15631 /* ARMv8.2 fp16 VCVT instruction. */
15632 if (flavour
== neon_cvt_flavour_s32_f16
15633 ||flavour
== neon_cvt_flavour_u32_f16
)
15634 do_scalar_fp16_v82_encode ();
15635 inst
.instruction
|= op
<< 7;
15636 inst
.instruction
|= rm
<< 16;
15637 inst
.instruction
|= 0xf0000000;
15638 inst
.is_neon
= TRUE
;
15642 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15644 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15645 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
15646 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
15648 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15650 if (flavour
== neon_cvt_flavour_invalid
)
15653 /* PR11109: Handle round-to-zero for VCVT conversions. */
15654 if (mode
== neon_cvt_mode_z
15655 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15656 && (flavour
== neon_cvt_flavour_s16_f16
15657 || flavour
== neon_cvt_flavour_u16_f16
15658 || flavour
== neon_cvt_flavour_s32_f32
15659 || flavour
== neon_cvt_flavour_u32_f32
15660 || flavour
== neon_cvt_flavour_s32_f64
15661 || flavour
== neon_cvt_flavour_u32_f64
)
15662 && (rs
== NS_FD
|| rs
== NS_FF
))
15664 do_vfp_nsyn_cvtz ();
15668 /* ARMv8.2 fp16 VCVT conversions. */
15669 if (mode
== neon_cvt_mode_z
15670 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
15671 && (flavour
== neon_cvt_flavour_s32_f16
15672 || flavour
== neon_cvt_flavour_u32_f16
)
15675 do_vfp_nsyn_cvtz ();
15676 do_scalar_fp16_v82_encode ();
15680 /* VFP rather than Neon conversions. */
15681 if (flavour
>= neon_cvt_flavour_first_fp
)
15683 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15684 do_vfp_nsyn_cvt (rs
, flavour
);
15686 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15697 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15698 0x0000100, 0x1000100, 0x0, 0x1000000};
15700 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15703 /* Fixed-point conversion with #0 immediate is encoded as an
15704 integer conversion. */
15705 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15707 NEON_ENCODE (IMMED
, inst
);
15708 if (flavour
!= neon_cvt_flavour_invalid
)
15709 inst
.instruction
|= enctab
[flavour
];
15710 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15711 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15712 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15713 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15714 inst
.instruction
|= neon_quad (rs
) << 6;
15715 inst
.instruction
|= 1 << 21;
15716 if (flavour
< neon_cvt_flavour_s16_f16
)
15718 inst
.instruction
|= 1 << 21;
15719 immbits
= 32 - inst
.operands
[2].imm
;
15720 inst
.instruction
|= immbits
<< 16;
15724 inst
.instruction
|= 3 << 20;
15725 immbits
= 16 - inst
.operands
[2].imm
;
15726 inst
.instruction
|= immbits
<< 16;
15727 inst
.instruction
&= ~(1 << 9);
15730 neon_dp_fixup (&inst
);
15736 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15738 NEON_ENCODE (FLOAT
, inst
);
15739 set_it_insn_type (OUTSIDE_IT_INSN
);
15741 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15744 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15745 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15746 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15747 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15748 inst
.instruction
|= neon_quad (rs
) << 6;
15749 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
15750 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
15751 inst
.instruction
|= mode
<< 8;
15752 if (flavour
== neon_cvt_flavour_u16_f16
15753 || flavour
== neon_cvt_flavour_s16_f16
)
15754 /* Mask off the original size bits and reencode them. */
15755 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
15758 inst
.instruction
|= 0xfc000000;
15760 inst
.instruction
|= 0xf0000000;
15766 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
15767 0x100, 0x180, 0x0, 0x080};
15769 NEON_ENCODE (INTEGER
, inst
);
15771 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15774 if (flavour
!= neon_cvt_flavour_invalid
)
15775 inst
.instruction
|= enctab
[flavour
];
15777 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15778 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15779 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15780 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15781 inst
.instruction
|= neon_quad (rs
) << 6;
15782 if (flavour
>= neon_cvt_flavour_s16_f16
15783 && flavour
<= neon_cvt_flavour_f16_u16
)
15784 /* Half precision. */
15785 inst
.instruction
|= 1 << 18;
15787 inst
.instruction
|= 2 << 18;
15789 neon_dp_fixup (&inst
);
15794 /* Half-precision conversions for Advanced SIMD -- neon. */
15799 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15801 as_bad (_("operand size must match register width"));
15806 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15808 as_bad (_("operand size must match register width"));
15813 inst
.instruction
= 0x3b60600;
15815 inst
.instruction
= 0x3b60700;
15817 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15818 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15819 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15820 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15821 neon_dp_fixup (&inst
);
15825 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15826 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15827 do_vfp_nsyn_cvt (rs
, flavour
);
15829 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15834 do_neon_cvtr (void)
15836 do_neon_cvt_1 (neon_cvt_mode_x
);
15842 do_neon_cvt_1 (neon_cvt_mode_z
);
15846 do_neon_cvta (void)
15848 do_neon_cvt_1 (neon_cvt_mode_a
);
15852 do_neon_cvtn (void)
15854 do_neon_cvt_1 (neon_cvt_mode_n
);
15858 do_neon_cvtp (void)
15860 do_neon_cvt_1 (neon_cvt_mode_p
);
15864 do_neon_cvtm (void)
15866 do_neon_cvt_1 (neon_cvt_mode_m
);
15870 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15873 mark_feature_used (&fpu_vfp_ext_armv8
);
15875 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15876 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15877 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15878 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15879 inst
.instruction
|= to
? 0x10000 : 0;
15880 inst
.instruction
|= t
? 0x80 : 0;
15881 inst
.instruction
|= is_double
? 0x100 : 0;
15882 do_vfp_cond_or_thumb ();
15886 do_neon_cvttb_1 (bfd_boolean t
)
15888 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
15889 NS_DF
, NS_DH
, NS_NULL
);
15893 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15896 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15898 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15901 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15903 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15905 /* The VCVTB and VCVTT instructions with D-register operands
15906 don't work for SP only targets. */
15907 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15911 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15913 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15915 /* The VCVTB and VCVTT instructions with D-register operands
15916 don't work for SP only targets. */
15917 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15921 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15928 do_neon_cvtb (void)
15930 do_neon_cvttb_1 (FALSE
);
15935 do_neon_cvtt (void)
15937 do_neon_cvttb_1 (TRUE
);
15941 neon_move_immediate (void)
15943 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15944 struct neon_type_el et
= neon_check_type (2, rs
,
15945 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15946 unsigned immlo
, immhi
= 0, immbits
;
15947 int op
, cmode
, float_p
;
15949 constraint (et
.type
== NT_invtype
,
15950 _("operand size must be specified for immediate VMOV"));
15952 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15953 op
= (inst
.instruction
& (1 << 5)) != 0;
15955 immlo
= inst
.operands
[1].imm
;
15956 if (inst
.operands
[1].regisimm
)
15957 immhi
= inst
.operands
[1].reg
;
15959 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
15960 _("immediate has bits set outside the operand size"));
15962 float_p
= inst
.operands
[1].immisfloat
;
15964 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
15965 et
.size
, et
.type
)) == FAIL
)
15967 /* Invert relevant bits only. */
15968 neon_invert_size (&immlo
, &immhi
, et
.size
);
15969 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15970 with one or the other; those cases are caught by
15971 neon_cmode_for_move_imm. */
15973 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
15974 &op
, et
.size
, et
.type
)) == FAIL
)
15976 first_error (_("immediate out of range"));
15981 inst
.instruction
&= ~(1 << 5);
15982 inst
.instruction
|= op
<< 5;
15984 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15985 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15986 inst
.instruction
|= neon_quad (rs
) << 6;
15987 inst
.instruction
|= cmode
<< 8;
15989 neon_write_immbits (immbits
);
15995 if (inst
.operands
[1].isreg
)
15997 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15999 NEON_ENCODE (INTEGER
, inst
);
16000 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16001 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16002 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16003 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16004 inst
.instruction
|= neon_quad (rs
) << 6;
16008 NEON_ENCODE (IMMED
, inst
);
16009 neon_move_immediate ();
16012 neon_dp_fixup (&inst
);
16015 /* Encode instructions of form:
16017 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16018 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16021 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16023 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16024 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16025 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16026 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16027 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16028 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16029 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16030 inst
.instruction
|= neon_logbits (size
) << 20;
16032 neon_dp_fixup (&inst
);
16036 do_neon_dyadic_long (void)
16038 /* FIXME: Type checking for lengthening op. */
16039 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16040 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16041 neon_mixed_length (et
, et
.size
);
16045 do_neon_abal (void)
16047 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16048 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16049 neon_mixed_length (et
, et
.size
);
16053 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16055 if (inst
.operands
[2].isscalar
)
16057 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16058 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16059 NEON_ENCODE (SCALAR
, inst
);
16060 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16064 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16065 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16066 NEON_ENCODE (INTEGER
, inst
);
16067 neon_mixed_length (et
, et
.size
);
16072 do_neon_mac_maybe_scalar_long (void)
16074 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16078 do_neon_dyadic_wide (void)
16080 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
16081 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16082 neon_mixed_length (et
, et
.size
);
16086 do_neon_dyadic_narrow (void)
16088 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16089 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
16090 /* Operand sign is unimportant, and the U bit is part of the opcode,
16091 so force the operand type to integer. */
16092 et
.type
= NT_integer
;
16093 neon_mixed_length (et
, et
.size
/ 2);
16097 do_neon_mul_sat_scalar_long (void)
16099 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
16103 do_neon_vmull (void)
16105 if (inst
.operands
[2].isscalar
)
16106 do_neon_mac_maybe_scalar_long ();
16109 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16110 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
16112 if (et
.type
== NT_poly
)
16113 NEON_ENCODE (POLY
, inst
);
16115 NEON_ENCODE (INTEGER
, inst
);
16117 /* For polynomial encoding the U bit must be zero, and the size must
16118 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16119 obviously, as 0b10). */
16122 /* Check we're on the correct architecture. */
16123 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
16125 _("Instruction form not available on this architecture.");
16130 neon_mixed_length (et
, et
.size
);
16137 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
16138 struct neon_type_el et
= neon_check_type (3, rs
,
16139 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16140 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
16142 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
16143 _("shift out of range"));
16144 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16145 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16146 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16147 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16148 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16149 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16150 inst
.instruction
|= neon_quad (rs
) << 6;
16151 inst
.instruction
|= imm
<< 8;
16153 neon_dp_fixup (&inst
);
16159 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16160 struct neon_type_el et
= neon_check_type (2, rs
,
16161 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16162 unsigned op
= (inst
.instruction
>> 7) & 3;
16163 /* N (width of reversed regions) is encoded as part of the bitmask. We
16164 extract it here to check the elements to be reversed are smaller.
16165 Otherwise we'd get a reserved instruction. */
16166 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
16167 gas_assert (elsize
!= 0);
16168 constraint (et
.size
>= elsize
,
16169 _("elements must be smaller than reversal region"));
16170 neon_two_same (neon_quad (rs
), 1, et
.size
);
16176 if (inst
.operands
[1].isscalar
)
16178 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16179 struct neon_type_el et
= neon_check_type (2, rs
,
16180 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16181 unsigned sizebits
= et
.size
>> 3;
16182 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16183 int logsize
= neon_logbits (et
.size
);
16184 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16186 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16189 NEON_ENCODE (SCALAR
, inst
);
16190 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16191 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16192 inst
.instruction
|= LOW4 (dm
);
16193 inst
.instruction
|= HI1 (dm
) << 5;
16194 inst
.instruction
|= neon_quad (rs
) << 6;
16195 inst
.instruction
|= x
<< 17;
16196 inst
.instruction
|= sizebits
<< 16;
16198 neon_dp_fixup (&inst
);
16202 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16203 struct neon_type_el et
= neon_check_type (2, rs
,
16204 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16205 /* Duplicate ARM register to lanes of vector. */
16206 NEON_ENCODE (ARMREG
, inst
);
16209 case 8: inst
.instruction
|= 0x400000; break;
16210 case 16: inst
.instruction
|= 0x000020; break;
16211 case 32: inst
.instruction
|= 0x000000; break;
16214 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16215 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16216 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16217 inst
.instruction
|= neon_quad (rs
) << 21;
16218 /* The encoding for this instruction is identical for the ARM and Thumb
16219 variants, except for the condition field. */
16220 do_vfp_cond_or_thumb ();
16224 /* VMOV has particularly many variations. It can be one of:
16225 0. VMOV<c><q> <Qd>, <Qm>
16226 1. VMOV<c><q> <Dd>, <Dm>
16227 (Register operations, which are VORR with Rm = Rn.)
16228 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16229 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16231 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16232 (ARM register to scalar.)
16233 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16234 (Two ARM registers to vector.)
16235 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16236 (Scalar to ARM register.)
16237 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16238 (Vector to two ARM registers.)
16239 8. VMOV.F32 <Sd>, <Sm>
16240 9. VMOV.F64 <Dd>, <Dm>
16241 (VFP register moves.)
16242 10. VMOV.F32 <Sd>, #imm
16243 11. VMOV.F64 <Dd>, #imm
16244 (VFP float immediate load.)
16245 12. VMOV <Rd>, <Sm>
16246 (VFP single to ARM reg.)
16247 13. VMOV <Sd>, <Rm>
16248 (ARM reg to VFP single.)
16249 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16250 (Two ARM regs to two VFP singles.)
16251 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16252 (Two VFP singles to two ARM regs.)
16254 These cases can be disambiguated using neon_select_shape, except cases 1/9
16255 and 3/11 which depend on the operand type too.
16257 All the encoded bits are hardcoded by this function.
16259 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16260 Cases 5, 7 may be used with VFPv2 and above.
16262 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16263 can specify a type where it doesn't make sense to, and is ignored). */
16268 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16269 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16270 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16271 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16272 struct neon_type_el et
;
16273 const char *ldconst
= 0;
16277 case NS_DD
: /* case 1/9. */
16278 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16279 /* It is not an error here if no type is given. */
16281 if (et
.type
== NT_float
&& et
.size
== 64)
16283 do_vfp_nsyn_opcode ("fcpyd");
16286 /* fall through. */
16288 case NS_QQ
: /* case 0/1. */
16290 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16292 /* The architecture manual I have doesn't explicitly state which
16293 value the U bit should have for register->register moves, but
16294 the equivalent VORR instruction has U = 0, so do that. */
16295 inst
.instruction
= 0x0200110;
16296 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16297 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16298 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16299 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16300 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16301 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16302 inst
.instruction
|= neon_quad (rs
) << 6;
16304 neon_dp_fixup (&inst
);
16308 case NS_DI
: /* case 3/11. */
16309 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16311 if (et
.type
== NT_float
&& et
.size
== 64)
16313 /* case 11 (fconstd). */
16314 ldconst
= "fconstd";
16315 goto encode_fconstd
;
16317 /* fall through. */
16319 case NS_QI
: /* case 2/3. */
16320 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16322 inst
.instruction
= 0x0800010;
16323 neon_move_immediate ();
16324 neon_dp_fixup (&inst
);
16327 case NS_SR
: /* case 4. */
16329 unsigned bcdebits
= 0;
16331 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
16332 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
16334 /* .<size> is optional here, defaulting to .32. */
16335 if (inst
.vectype
.elems
== 0
16336 && inst
.operands
[0].vectype
.type
== NT_invtype
16337 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16339 inst
.vectype
.el
[0].type
= NT_untyped
;
16340 inst
.vectype
.el
[0].size
= 32;
16341 inst
.vectype
.elems
= 1;
16344 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16345 logsize
= neon_logbits (et
.size
);
16347 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16349 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16350 && et
.size
!= 32, _(BAD_FPU
));
16351 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16352 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16356 case 8: bcdebits
= 0x8; break;
16357 case 16: bcdebits
= 0x1; break;
16358 case 32: bcdebits
= 0x0; break;
16362 bcdebits
|= x
<< logsize
;
16364 inst
.instruction
= 0xe000b10;
16365 do_vfp_cond_or_thumb ();
16366 inst
.instruction
|= LOW4 (dn
) << 16;
16367 inst
.instruction
|= HI1 (dn
) << 7;
16368 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16369 inst
.instruction
|= (bcdebits
& 3) << 5;
16370 inst
.instruction
|= (bcdebits
>> 2) << 21;
16374 case NS_DRR
: /* case 5 (fmdrr). */
16375 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16378 inst
.instruction
= 0xc400b10;
16379 do_vfp_cond_or_thumb ();
16380 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16381 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16382 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16383 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16386 case NS_RS
: /* case 6. */
16389 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16390 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16391 unsigned abcdebits
= 0;
16393 /* .<dt> is optional here, defaulting to .32. */
16394 if (inst
.vectype
.elems
== 0
16395 && inst
.operands
[0].vectype
.type
== NT_invtype
16396 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16398 inst
.vectype
.el
[0].type
= NT_untyped
;
16399 inst
.vectype
.el
[0].size
= 32;
16400 inst
.vectype
.elems
= 1;
16403 et
= neon_check_type (2, NS_NULL
,
16404 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16405 logsize
= neon_logbits (et
.size
);
16407 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16409 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16410 && et
.size
!= 32, _(BAD_FPU
));
16411 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16412 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16416 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16417 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16418 case 32: abcdebits
= 0x00; break;
16422 abcdebits
|= x
<< logsize
;
16423 inst
.instruction
= 0xe100b10;
16424 do_vfp_cond_or_thumb ();
16425 inst
.instruction
|= LOW4 (dn
) << 16;
16426 inst
.instruction
|= HI1 (dn
) << 7;
16427 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16428 inst
.instruction
|= (abcdebits
& 3) << 5;
16429 inst
.instruction
|= (abcdebits
>> 2) << 21;
16433 case NS_RRD
: /* case 7 (fmrrd). */
16434 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16437 inst
.instruction
= 0xc500b10;
16438 do_vfp_cond_or_thumb ();
16439 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16440 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16441 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16442 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16445 case NS_FF
: /* case 8 (fcpys). */
16446 do_vfp_nsyn_opcode ("fcpys");
16450 case NS_FI
: /* case 10 (fconsts). */
16451 ldconst
= "fconsts";
16453 if (is_quarter_float (inst
.operands
[1].imm
))
16455 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16456 do_vfp_nsyn_opcode (ldconst
);
16458 /* ARMv8.2 fp16 vmov.f16 instruction. */
16460 do_scalar_fp16_v82_encode ();
16463 first_error (_("immediate out of range"));
16467 case NS_RF
: /* case 12 (fmrs). */
16468 do_vfp_nsyn_opcode ("fmrs");
16469 /* ARMv8.2 fp16 vmov.f16 instruction. */
16471 do_scalar_fp16_v82_encode ();
16475 case NS_FR
: /* case 13 (fmsr). */
16476 do_vfp_nsyn_opcode ("fmsr");
16477 /* ARMv8.2 fp16 vmov.f16 instruction. */
16479 do_scalar_fp16_v82_encode ();
16482 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16483 (one of which is a list), but we have parsed four. Do some fiddling to
16484 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16486 case NS_RRFF
: /* case 14 (fmrrs). */
16487 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16488 _("VFP registers must be adjacent"));
16489 inst
.operands
[2].imm
= 2;
16490 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16491 do_vfp_nsyn_opcode ("fmrrs");
16494 case NS_FFRR
: /* case 15 (fmsrr). */
16495 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16496 _("VFP registers must be adjacent"));
16497 inst
.operands
[1] = inst
.operands
[2];
16498 inst
.operands
[2] = inst
.operands
[3];
16499 inst
.operands
[0].imm
= 2;
16500 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16501 do_vfp_nsyn_opcode ("fmsrr");
16505 /* neon_select_shape has determined that the instruction
16506 shape is wrong and has already set the error message. */
16515 do_neon_rshift_round_imm (void)
16517 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16518 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16519 int imm
= inst
.operands
[2].imm
;
16521 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16524 inst
.operands
[2].present
= 0;
16529 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16530 _("immediate out of range for shift"));
16531 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16536 do_neon_movhf (void)
16538 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
16539 constraint (rs
!= NS_HH
, _("invalid suffix"));
16541 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16544 do_vfp_sp_monadic ();
16547 inst
.instruction
|= 0xf0000000;
16551 do_neon_movl (void)
16553 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16554 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16555 unsigned sizebits
= et
.size
>> 3;
16556 inst
.instruction
|= sizebits
<< 19;
16557 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16563 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16564 struct neon_type_el et
= neon_check_type (2, rs
,
16565 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16566 NEON_ENCODE (INTEGER
, inst
);
16567 neon_two_same (neon_quad (rs
), 1, et
.size
);
16571 do_neon_zip_uzp (void)
16573 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16574 struct neon_type_el et
= neon_check_type (2, rs
,
16575 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16576 if (rs
== NS_DD
&& et
.size
== 32)
16578 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16579 inst
.instruction
= N_MNEM_vtrn
;
16583 neon_two_same (neon_quad (rs
), 1, et
.size
);
16587 do_neon_sat_abs_neg (void)
16589 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16590 struct neon_type_el et
= neon_check_type (2, rs
,
16591 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16592 neon_two_same (neon_quad (rs
), 1, et
.size
);
16596 do_neon_pair_long (void)
16598 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16599 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16600 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16601 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16602 neon_two_same (neon_quad (rs
), 1, et
.size
);
16606 do_neon_recip_est (void)
16608 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16609 struct neon_type_el et
= neon_check_type (2, rs
,
16610 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
16611 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16612 neon_two_same (neon_quad (rs
), 1, et
.size
);
16618 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16619 struct neon_type_el et
= neon_check_type (2, rs
,
16620 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16621 neon_two_same (neon_quad (rs
), 1, et
.size
);
16627 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16628 struct neon_type_el et
= neon_check_type (2, rs
,
16629 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16630 neon_two_same (neon_quad (rs
), 1, et
.size
);
16636 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16637 struct neon_type_el et
= neon_check_type (2, rs
,
16638 N_EQK
| N_INT
, N_8
| N_KEY
);
16639 neon_two_same (neon_quad (rs
), 1, et
.size
);
16645 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16646 neon_two_same (neon_quad (rs
), 1, -1);
16650 do_neon_tbl_tbx (void)
16652 unsigned listlenbits
;
16653 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16655 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16657 first_error (_("bad list length for table lookup"));
16661 listlenbits
= inst
.operands
[1].imm
- 1;
16662 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16663 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16664 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16665 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16666 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16667 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16668 inst
.instruction
|= listlenbits
<< 8;
16670 neon_dp_fixup (&inst
);
16674 do_neon_ldm_stm (void)
16676 /* P, U and L bits are part of bitmask. */
16677 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16678 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16680 if (inst
.operands
[1].issingle
)
16682 do_vfp_nsyn_ldm_stm (is_dbmode
);
16686 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16687 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16689 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16690 _("register list must contain at least 1 and at most 16 "
16693 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16694 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16695 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16696 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16698 inst
.instruction
|= offsetbits
;
16700 do_vfp_cond_or_thumb ();
16704 do_neon_ldr_str (void)
16706 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16708 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16709 And is UNPREDICTABLE in thumb mode. */
16711 && inst
.operands
[1].reg
== REG_PC
16712 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16715 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16716 else if (warn_on_deprecated
)
16717 as_tsktsk (_("Use of PC here is deprecated"));
16720 if (inst
.operands
[0].issingle
)
16723 do_vfp_nsyn_opcode ("flds");
16725 do_vfp_nsyn_opcode ("fsts");
16727 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16728 if (inst
.vectype
.el
[0].size
== 16)
16729 do_scalar_fp16_v82_encode ();
16734 do_vfp_nsyn_opcode ("fldd");
16736 do_vfp_nsyn_opcode ("fstd");
16740 /* "interleave" version also handles non-interleaving register VLD1/VST1
16744 do_neon_ld_st_interleave (void)
16746 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16747 N_8
| N_16
| N_32
| N_64
);
16748 unsigned alignbits
= 0;
16750 /* The bits in this table go:
16751 0: register stride of one (0) or two (1)
16752 1,2: register list length, minus one (1, 2, 3, 4).
16753 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16754 We use -1 for invalid entries. */
16755 const int typetable
[] =
16757 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16758 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16759 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16760 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16764 if (et
.type
== NT_invtype
)
16767 if (inst
.operands
[1].immisalign
)
16768 switch (inst
.operands
[1].imm
>> 8)
16770 case 64: alignbits
= 1; break;
16772 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16773 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16774 goto bad_alignment
;
16778 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16779 goto bad_alignment
;
16784 first_error (_("bad alignment"));
16788 inst
.instruction
|= alignbits
<< 4;
16789 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16791 /* Bits [4:6] of the immediate in a list specifier encode register stride
16792 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16793 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16794 up the right value for "type" in a table based on this value and the given
16795 list style, then stick it back. */
16796 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16797 | (((inst
.instruction
>> 8) & 3) << 3);
16799 typebits
= typetable
[idx
];
16801 constraint (typebits
== -1, _("bad list type for instruction"));
16802 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16803 _("bad element type for instruction"));
16805 inst
.instruction
&= ~0xf00;
16806 inst
.instruction
|= typebits
<< 8;
16809 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16810 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16811 otherwise. The variable arguments are a list of pairs of legal (size, align)
16812 values, terminated with -1. */
16815 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
16818 int result
= FAIL
, thissize
, thisalign
;
16820 if (!inst
.operands
[1].immisalign
)
16826 va_start (ap
, do_alignment
);
16830 thissize
= va_arg (ap
, int);
16831 if (thissize
== -1)
16833 thisalign
= va_arg (ap
, int);
16835 if (size
== thissize
&& align
== thisalign
)
16838 while (result
!= SUCCESS
);
16842 if (result
== SUCCESS
)
16845 first_error (_("unsupported alignment for instruction"));
16851 do_neon_ld_st_lane (void)
16853 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16854 int align_good
, do_alignment
= 0;
16855 int logsize
= neon_logbits (et
.size
);
16856 int align
= inst
.operands
[1].imm
>> 8;
16857 int n
= (inst
.instruction
>> 8) & 3;
16858 int max_el
= 64 / et
.size
;
16860 if (et
.type
== NT_invtype
)
16863 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16864 _("bad list length"));
16865 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16866 _("scalar index out of range"));
16867 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16869 _("stride of 2 unavailable when element size is 8"));
16873 case 0: /* VLD1 / VST1. */
16874 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
16876 if (align_good
== FAIL
)
16880 unsigned alignbits
= 0;
16883 case 16: alignbits
= 0x1; break;
16884 case 32: alignbits
= 0x3; break;
16887 inst
.instruction
|= alignbits
<< 4;
16891 case 1: /* VLD2 / VST2. */
16892 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
16893 16, 32, 32, 64, -1);
16894 if (align_good
== FAIL
)
16897 inst
.instruction
|= 1 << 4;
16900 case 2: /* VLD3 / VST3. */
16901 constraint (inst
.operands
[1].immisalign
,
16902 _("can't use alignment with this instruction"));
16905 case 3: /* VLD4 / VST4. */
16906 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
16907 16, 64, 32, 64, 32, 128, -1);
16908 if (align_good
== FAIL
)
16912 unsigned alignbits
= 0;
16915 case 8: alignbits
= 0x1; break;
16916 case 16: alignbits
= 0x1; break;
16917 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16920 inst
.instruction
|= alignbits
<< 4;
16927 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16928 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16929 inst
.instruction
|= 1 << (4 + logsize
);
16931 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16932 inst
.instruction
|= logsize
<< 10;
16935 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16938 do_neon_ld_dup (void)
16940 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16941 int align_good
, do_alignment
= 0;
16943 if (et
.type
== NT_invtype
)
16946 switch ((inst
.instruction
>> 8) & 3)
16948 case 0: /* VLD1. */
16949 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16950 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16951 &do_alignment
, 16, 16, 32, 32, -1);
16952 if (align_good
== FAIL
)
16954 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
16957 case 2: inst
.instruction
|= 1 << 5; break;
16958 default: first_error (_("bad list length")); return;
16960 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16963 case 1: /* VLD2. */
16964 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16965 &do_alignment
, 8, 16, 16, 32, 32, 64,
16967 if (align_good
== FAIL
)
16969 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
16970 _("bad list length"));
16971 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16972 inst
.instruction
|= 1 << 5;
16973 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16976 case 2: /* VLD3. */
16977 constraint (inst
.operands
[1].immisalign
,
16978 _("can't use alignment with this instruction"));
16979 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
16980 _("bad list length"));
16981 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16982 inst
.instruction
|= 1 << 5;
16983 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16986 case 3: /* VLD4. */
16988 int align
= inst
.operands
[1].imm
>> 8;
16989 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
16990 16, 64, 32, 64, 32, 128, -1);
16991 if (align_good
== FAIL
)
16993 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
16994 _("bad list length"));
16995 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16996 inst
.instruction
|= 1 << 5;
16997 if (et
.size
== 32 && align
== 128)
16998 inst
.instruction
|= 0x3 << 6;
17000 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17007 inst
.instruction
|= do_alignment
<< 4;
17010 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17011 apart from bits [11:4]. */
17014 do_neon_ldx_stx (void)
17016 if (inst
.operands
[1].isreg
)
17017 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
17019 switch (NEON_LANE (inst
.operands
[0].imm
))
17021 case NEON_INTERLEAVE_LANES
:
17022 NEON_ENCODE (INTERLV
, inst
);
17023 do_neon_ld_st_interleave ();
17026 case NEON_ALL_LANES
:
17027 NEON_ENCODE (DUP
, inst
);
17028 if (inst
.instruction
== N_INV
)
17030 first_error ("only loads support such operands");
17037 NEON_ENCODE (LANE
, inst
);
17038 do_neon_ld_st_lane ();
17041 /* L bit comes from bit mask. */
17042 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17043 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17044 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17046 if (inst
.operands
[1].postind
)
17048 int postreg
= inst
.operands
[1].imm
& 0xf;
17049 constraint (!inst
.operands
[1].immisreg
,
17050 _("post-index must be a register"));
17051 constraint (postreg
== 0xd || postreg
== 0xf,
17052 _("bad register for post-index"));
17053 inst
.instruction
|= postreg
;
17057 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17058 constraint (inst
.reloc
.exp
.X_op
!= O_constant
17059 || inst
.reloc
.exp
.X_add_number
!= 0,
17062 if (inst
.operands
[1].writeback
)
17064 inst
.instruction
|= 0xd;
17067 inst
.instruction
|= 0xf;
17071 inst
.instruction
|= 0xf9000000;
17073 inst
.instruction
|= 0xf4000000;
17078 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
17080 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17081 D register operands. */
17082 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17083 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17086 NEON_ENCODE (FPV8
, inst
);
17088 if (rs
== NS_FFF
|| rs
== NS_HHH
)
17090 do_vfp_sp_dyadic ();
17092 /* ARMv8.2 fp16 instruction. */
17094 do_scalar_fp16_v82_encode ();
17097 do_vfp_dp_rd_rn_rm ();
17100 inst
.instruction
|= 0x100;
17102 inst
.instruction
|= 0xf0000000;
17108 set_it_insn_type (OUTSIDE_IT_INSN
);
17110 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
17111 first_error (_("invalid instruction shape"));
17117 set_it_insn_type (OUTSIDE_IT_INSN
);
17119 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
17122 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17125 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
17129 do_vrint_1 (enum neon_cvt_mode mode
)
17131 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
17132 struct neon_type_el et
;
17137 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17138 D register operands. */
17139 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17140 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17143 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
17145 if (et
.type
!= NT_invtype
)
17147 /* VFP encodings. */
17148 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17149 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
17150 set_it_insn_type (OUTSIDE_IT_INSN
);
17152 NEON_ENCODE (FPV8
, inst
);
17153 if (rs
== NS_FF
|| rs
== NS_HH
)
17154 do_vfp_sp_monadic ();
17156 do_vfp_dp_rd_rm ();
17160 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
17161 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
17162 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
17163 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
17164 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
17165 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
17166 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
17170 inst
.instruction
|= (rs
== NS_DD
) << 8;
17171 do_vfp_cond_or_thumb ();
17173 /* ARMv8.2 fp16 vrint instruction. */
17175 do_scalar_fp16_v82_encode ();
17179 /* Neon encodings (or something broken...). */
17181 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
17183 if (et
.type
== NT_invtype
)
17186 set_it_insn_type (OUTSIDE_IT_INSN
);
17187 NEON_ENCODE (FLOAT
, inst
);
17189 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17192 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17193 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17194 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17195 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17196 inst
.instruction
|= neon_quad (rs
) << 6;
17197 /* Mask off the original size bits and reencode them. */
17198 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
17199 | neon_logbits (et
.size
) << 18);
17203 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17204 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17205 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17206 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17207 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17208 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17209 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17214 inst
.instruction
|= 0xfc000000;
17216 inst
.instruction
|= 0xf0000000;
17223 do_vrint_1 (neon_cvt_mode_x
);
17229 do_vrint_1 (neon_cvt_mode_z
);
17235 do_vrint_1 (neon_cvt_mode_r
);
17241 do_vrint_1 (neon_cvt_mode_a
);
17247 do_vrint_1 (neon_cvt_mode_n
);
17253 do_vrint_1 (neon_cvt_mode_p
);
17259 do_vrint_1 (neon_cvt_mode_m
);
17262 /* Crypto v1 instructions. */
17264 do_crypto_2op_1 (unsigned elttype
, int op
)
17266 set_it_insn_type (OUTSIDE_IT_INSN
);
17268 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
17274 NEON_ENCODE (INTEGER
, inst
);
17275 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17276 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17277 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17278 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17280 inst
.instruction
|= op
<< 6;
17283 inst
.instruction
|= 0xfc000000;
17285 inst
.instruction
|= 0xf0000000;
17289 do_crypto_3op_1 (int u
, int op
)
17291 set_it_insn_type (OUTSIDE_IT_INSN
);
17293 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
17294 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
17299 NEON_ENCODE (INTEGER
, inst
);
17300 neon_three_same (1, u
, 8 << op
);
17306 do_crypto_2op_1 (N_8
, 0);
17312 do_crypto_2op_1 (N_8
, 1);
17318 do_crypto_2op_1 (N_8
, 2);
17324 do_crypto_2op_1 (N_8
, 3);
17330 do_crypto_3op_1 (0, 0);
17336 do_crypto_3op_1 (0, 1);
17342 do_crypto_3op_1 (0, 2);
17348 do_crypto_3op_1 (0, 3);
17354 do_crypto_3op_1 (1, 0);
17360 do_crypto_3op_1 (1, 1);
17364 do_sha256su1 (void)
17366 do_crypto_3op_1 (1, 2);
17372 do_crypto_2op_1 (N_32
, -1);
17378 do_crypto_2op_1 (N_32
, 0);
17382 do_sha256su0 (void)
17384 do_crypto_2op_1 (N_32
, 1);
17388 do_crc32_1 (unsigned int poly
, unsigned int sz
)
17390 unsigned int Rd
= inst
.operands
[0].reg
;
17391 unsigned int Rn
= inst
.operands
[1].reg
;
17392 unsigned int Rm
= inst
.operands
[2].reg
;
17394 set_it_insn_type (OUTSIDE_IT_INSN
);
17395 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
17396 inst
.instruction
|= LOW4 (Rn
) << 16;
17397 inst
.instruction
|= LOW4 (Rm
);
17398 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
17399 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
17401 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
17402 as_warn (UNPRED_REG ("r15"));
17403 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
17404 as_warn (UNPRED_REG ("r13"));
17444 /* Overall per-instruction processing. */
17446 /* We need to be able to fix up arbitrary expressions in some statements.
17447 This is so that we can handle symbols that are an arbitrary distance from
17448 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17449 which returns part of an address in a form which will be valid for
17450 a data instruction. We do this by pushing the expression into a symbol
17451 in the expr_section, and creating a fix for that. */
17454 fix_new_arm (fragS
* frag
,
17468 /* Create an absolute valued symbol, so we have something to
17469 refer to in the object file. Unfortunately for us, gas's
17470 generic expression parsing will already have folded out
17471 any use of .set foo/.type foo %function that may have
17472 been used to set type information of the target location,
17473 that's being specified symbolically. We have to presume
17474 the user knows what they are doing. */
17478 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17480 symbol
= symbol_find_or_make (name
);
17481 S_SET_SEGMENT (symbol
, absolute_section
);
17482 symbol_set_frag (symbol
, &zero_address_frag
);
17483 S_SET_VALUE (symbol
, exp
->X_add_number
);
17484 exp
->X_op
= O_symbol
;
17485 exp
->X_add_symbol
= symbol
;
17486 exp
->X_add_number
= 0;
17492 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17493 (enum bfd_reloc_code_real
) reloc
);
17497 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17498 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17502 /* Mark whether the fix is to a THUMB instruction, or an ARM
17504 new_fix
->tc_fix_data
= thumb_mode
;
17507 /* Create a frg for an instruction requiring relaxation. */
17509 output_relax_insn (void)
17515 /* The size of the instruction is unknown, so tie the debug info to the
17516 start of the instruction. */
17517 dwarf2_emit_insn (0);
17519 switch (inst
.reloc
.exp
.X_op
)
17522 sym
= inst
.reloc
.exp
.X_add_symbol
;
17523 offset
= inst
.reloc
.exp
.X_add_number
;
17527 offset
= inst
.reloc
.exp
.X_add_number
;
17530 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17534 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17535 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17536 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17539 /* Write a 32-bit thumb instruction to buf. */
17541 put_thumb32_insn (char * buf
, unsigned long insn
)
17543 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17544 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17548 output_inst (const char * str
)
17554 as_bad ("%s -- `%s'", inst
.error
, str
);
17559 output_relax_insn ();
17562 if (inst
.size
== 0)
17565 to
= frag_more (inst
.size
);
17566 /* PR 9814: Record the thumb mode into the current frag so that we know
17567 what type of NOP padding to use, if necessary. We override any previous
17568 setting so that if the mode has changed then the NOPS that we use will
17569 match the encoding of the last instruction in the frag. */
17570 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17572 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17574 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17575 put_thumb32_insn (to
, inst
.instruction
);
17577 else if (inst
.size
> INSN_SIZE
)
17579 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17580 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17581 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17584 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17586 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17587 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17588 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17591 dwarf2_emit_insn (inst
.size
);
17595 output_it_inst (int cond
, int mask
, char * to
)
17597 unsigned long instruction
= 0xbf00;
17600 instruction
|= mask
;
17601 instruction
|= cond
<< 4;
17605 to
= frag_more (2);
17607 dwarf2_emit_insn (2);
17611 md_number_to_chars (to
, instruction
, 2);
17616 /* Tag values used in struct asm_opcode's tag field. */
17619 OT_unconditional
, /* Instruction cannot be conditionalized.
17620 The ARM condition field is still 0xE. */
17621 OT_unconditionalF
, /* Instruction cannot be conditionalized
17622 and carries 0xF in its ARM condition field. */
17623 OT_csuffix
, /* Instruction takes a conditional suffix. */
17624 OT_csuffixF
, /* Some forms of the instruction take a conditional
17625 suffix, others place 0xF where the condition field
17627 OT_cinfix3
, /* Instruction takes a conditional infix,
17628 beginning at character index 3. (In
17629 unified mode, it becomes a suffix.) */
17630 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17631 tsts, cmps, cmns, and teqs. */
17632 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17633 character index 3, even in unified mode. Used for
17634 legacy instructions where suffix and infix forms
17635 may be ambiguous. */
17636 OT_csuf_or_in3
, /* Instruction takes either a conditional
17637 suffix or an infix at character index 3. */
17638 OT_odd_infix_unc
, /* This is the unconditional variant of an
17639 instruction that takes a conditional infix
17640 at an unusual position. In unified mode,
17641 this variant will accept a suffix. */
17642 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17643 are the conditional variants of instructions that
17644 take conditional infixes in unusual positions.
17645 The infix appears at character index
17646 (tag - OT_odd_infix_0). These are not accepted
17647 in unified mode. */
17650 /* Subroutine of md_assemble, responsible for looking up the primary
17651 opcode from the mnemonic the user wrote. STR points to the
17652 beginning of the mnemonic.
17654 This is not simply a hash table lookup, because of conditional
17655 variants. Most instructions have conditional variants, which are
17656 expressed with a _conditional affix_ to the mnemonic. If we were
17657 to encode each conditional variant as a literal string in the opcode
17658 table, it would have approximately 20,000 entries.
17660 Most mnemonics take this affix as a suffix, and in unified syntax,
17661 'most' is upgraded to 'all'. However, in the divided syntax, some
17662 instructions take the affix as an infix, notably the s-variants of
17663 the arithmetic instructions. Of those instructions, all but six
17664 have the infix appear after the third character of the mnemonic.
17666 Accordingly, the algorithm for looking up primary opcodes given
17669 1. Look up the identifier in the opcode table.
17670 If we find a match, go to step U.
17672 2. Look up the last two characters of the identifier in the
17673 conditions table. If we find a match, look up the first N-2
17674 characters of the identifier in the opcode table. If we
17675 find a match, go to step CE.
17677 3. Look up the fourth and fifth characters of the identifier in
17678 the conditions table. If we find a match, extract those
17679 characters from the identifier, and look up the remaining
17680 characters in the opcode table. If we find a match, go
17685 U. Examine the tag field of the opcode structure, in case this is
17686 one of the six instructions with its conditional infix in an
17687 unusual place. If it is, the tag tells us where to find the
17688 infix; look it up in the conditions table and set inst.cond
17689 accordingly. Otherwise, this is an unconditional instruction.
17690 Again set inst.cond accordingly. Return the opcode structure.
17692 CE. Examine the tag field to make sure this is an instruction that
17693 should receive a conditional suffix. If it is not, fail.
17694 Otherwise, set inst.cond from the suffix we already looked up,
17695 and return the opcode structure.
17697 CM. Examine the tag field to make sure this is an instruction that
17698 should receive a conditional infix after the third character.
17699 If it is not, fail. Otherwise, undo the edits to the current
17700 line of input and proceed as for case CE. */
17702 static const struct asm_opcode
*
17703 opcode_lookup (char **str
)
17707 const struct asm_opcode
*opcode
;
17708 const struct asm_cond
*cond
;
17711 /* Scan up to the end of the mnemonic, which must end in white space,
17712 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17713 for (base
= end
= *str
; *end
!= '\0'; end
++)
17714 if (*end
== ' ' || *end
== '.')
17720 /* Handle a possible width suffix and/or Neon type suffix. */
17725 /* The .w and .n suffixes are only valid if the unified syntax is in
17727 if (unified_syntax
&& end
[1] == 'w')
17729 else if (unified_syntax
&& end
[1] == 'n')
17734 inst
.vectype
.elems
= 0;
17736 *str
= end
+ offset
;
17738 if (end
[offset
] == '.')
17740 /* See if we have a Neon type suffix (possible in either unified or
17741 non-unified ARM syntax mode). */
17742 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17745 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17751 /* Look for unaffixed or special-case affixed mnemonic. */
17752 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17757 if (opcode
->tag
< OT_odd_infix_0
)
17759 inst
.cond
= COND_ALWAYS
;
17763 if (warn_on_deprecated
&& unified_syntax
)
17764 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17765 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17766 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17769 inst
.cond
= cond
->value
;
17773 /* Cannot have a conditional suffix on a mnemonic of less than two
17775 if (end
- base
< 3)
17778 /* Look for suffixed mnemonic. */
17780 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17781 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17783 if (opcode
&& cond
)
17786 switch (opcode
->tag
)
17788 case OT_cinfix3_legacy
:
17789 /* Ignore conditional suffixes matched on infix only mnemonics. */
17793 case OT_cinfix3_deprecated
:
17794 case OT_odd_infix_unc
:
17795 if (!unified_syntax
)
17797 /* Fall through. */
17801 case OT_csuf_or_in3
:
17802 inst
.cond
= cond
->value
;
17805 case OT_unconditional
:
17806 case OT_unconditionalF
:
17808 inst
.cond
= cond
->value
;
17811 /* Delayed diagnostic. */
17812 inst
.error
= BAD_COND
;
17813 inst
.cond
= COND_ALWAYS
;
17822 /* Cannot have a usual-position infix on a mnemonic of less than
17823 six characters (five would be a suffix). */
17824 if (end
- base
< 6)
17827 /* Look for infixed mnemonic in the usual position. */
17829 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17833 memcpy (save
, affix
, 2);
17834 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17835 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17837 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17838 memcpy (affix
, save
, 2);
17841 && (opcode
->tag
== OT_cinfix3
17842 || opcode
->tag
== OT_cinfix3_deprecated
17843 || opcode
->tag
== OT_csuf_or_in3
17844 || opcode
->tag
== OT_cinfix3_legacy
))
17847 if (warn_on_deprecated
&& unified_syntax
17848 && (opcode
->tag
== OT_cinfix3
17849 || opcode
->tag
== OT_cinfix3_deprecated
))
17850 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17852 inst
.cond
= cond
->value
;
17859 /* This function generates an initial IT instruction, leaving its block
17860 virtually open for the new instructions. Eventually,
17861 the mask will be updated by now_it_add_mask () each time
17862 a new instruction needs to be included in the IT block.
17863 Finally, the block is closed with close_automatic_it_block ().
17864 The block closure can be requested either from md_assemble (),
17865 a tencode (), or due to a label hook. */
17868 new_automatic_it_block (int cond
)
17870 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17871 now_it
.mask
= 0x18;
17873 now_it
.block_length
= 1;
17874 mapping_state (MAP_THUMB
);
17875 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
17876 now_it
.warn_deprecated
= FALSE
;
17877 now_it
.insn_cond
= TRUE
;
17880 /* Close an automatic IT block.
17881 See comments in new_automatic_it_block (). */
17884 close_automatic_it_block (void)
17886 now_it
.mask
= 0x10;
17887 now_it
.block_length
= 0;
17890 /* Update the mask of the current automatically-generated IT
17891 instruction. See comments in new_automatic_it_block (). */
17894 now_it_add_mask (int cond
)
17896 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17897 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17898 | ((bitvalue) << (nbit)))
17899 const int resulting_bit
= (cond
& 1);
17901 now_it
.mask
&= 0xf;
17902 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17904 (5 - now_it
.block_length
));
17905 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17907 ((5 - now_it
.block_length
) - 1) );
17908 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
17911 #undef SET_BIT_VALUE
17914 /* The IT blocks handling machinery is accessed through the these functions:
17915 it_fsm_pre_encode () from md_assemble ()
17916 set_it_insn_type () optional, from the tencode functions
17917 set_it_insn_type_last () ditto
17918 in_it_block () ditto
17919 it_fsm_post_encode () from md_assemble ()
17920 force_automatic_it_block_close () from label habdling functions
17923 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17924 initializing the IT insn type with a generic initial value depending
17925 on the inst.condition.
17926 2) During the tencode function, two things may happen:
17927 a) The tencode function overrides the IT insn type by
17928 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17929 b) The tencode function queries the IT block state by
17930 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17932 Both set_it_insn_type and in_it_block run the internal FSM state
17933 handling function (handle_it_state), because: a) setting the IT insn
17934 type may incur in an invalid state (exiting the function),
17935 and b) querying the state requires the FSM to be updated.
17936 Specifically we want to avoid creating an IT block for conditional
17937 branches, so it_fsm_pre_encode is actually a guess and we can't
17938 determine whether an IT block is required until the tencode () routine
17939 has decided what type of instruction this actually it.
17940 Because of this, if set_it_insn_type and in_it_block have to be used,
17941 set_it_insn_type has to be called first.
17943 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17944 determines the insn IT type depending on the inst.cond code.
17945 When a tencode () routine encodes an instruction that can be
17946 either outside an IT block, or, in the case of being inside, has to be
17947 the last one, set_it_insn_type_last () will determine the proper
17948 IT instruction type based on the inst.cond code. Otherwise,
17949 set_it_insn_type can be called for overriding that logic or
17950 for covering other cases.
17952 Calling handle_it_state () may not transition the IT block state to
17953 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
17954 still queried. Instead, if the FSM determines that the state should
17955 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17956 after the tencode () function: that's what it_fsm_post_encode () does.
17958 Since in_it_block () calls the state handling function to get an
17959 updated state, an error may occur (due to invalid insns combination).
17960 In that case, inst.error is set.
17961 Therefore, inst.error has to be checked after the execution of
17962 the tencode () routine.
17964 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17965 any pending state change (if any) that didn't take place in
17966 handle_it_state () as explained above. */
17969 it_fsm_pre_encode (void)
17971 if (inst
.cond
!= COND_ALWAYS
)
17972 inst
.it_insn_type
= INSIDE_IT_INSN
;
17974 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
17976 now_it
.state_handled
= 0;
17979 /* IT state FSM handling function. */
17982 handle_it_state (void)
17984 now_it
.state_handled
= 1;
17985 now_it
.insn_cond
= FALSE
;
17987 switch (now_it
.state
)
17989 case OUTSIDE_IT_BLOCK
:
17990 switch (inst
.it_insn_type
)
17992 case OUTSIDE_IT_INSN
:
17995 case INSIDE_IT_INSN
:
17996 case INSIDE_IT_LAST_INSN
:
17997 if (thumb_mode
== 0)
18000 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
18001 as_tsktsk (_("Warning: conditional outside an IT block"\
18006 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
18007 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
18009 /* Automatically generate the IT instruction. */
18010 new_automatic_it_block (inst
.cond
);
18011 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
18012 close_automatic_it_block ();
18016 inst
.error
= BAD_OUT_IT
;
18022 case IF_INSIDE_IT_LAST_INSN
:
18023 case NEUTRAL_IT_INSN
:
18027 now_it
.state
= MANUAL_IT_BLOCK
;
18028 now_it
.block_length
= 0;
18033 case AUTOMATIC_IT_BLOCK
:
18034 /* Three things may happen now:
18035 a) We should increment current it block size;
18036 b) We should close current it block (closing insn or 4 insns);
18037 c) We should close current it block and start a new one (due
18038 to incompatible conditions or
18039 4 insns-length block reached). */
18041 switch (inst
.it_insn_type
)
18043 case OUTSIDE_IT_INSN
:
18044 /* The closure of the block shall happen immediately,
18045 so any in_it_block () call reports the block as closed. */
18046 force_automatic_it_block_close ();
18049 case INSIDE_IT_INSN
:
18050 case INSIDE_IT_LAST_INSN
:
18051 case IF_INSIDE_IT_LAST_INSN
:
18052 now_it
.block_length
++;
18054 if (now_it
.block_length
> 4
18055 || !now_it_compatible (inst
.cond
))
18057 force_automatic_it_block_close ();
18058 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
18059 new_automatic_it_block (inst
.cond
);
18063 now_it
.insn_cond
= TRUE
;
18064 now_it_add_mask (inst
.cond
);
18067 if (now_it
.state
== AUTOMATIC_IT_BLOCK
18068 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
18069 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
18070 close_automatic_it_block ();
18073 case NEUTRAL_IT_INSN
:
18074 now_it
.block_length
++;
18075 now_it
.insn_cond
= TRUE
;
18077 if (now_it
.block_length
> 4)
18078 force_automatic_it_block_close ();
18080 now_it_add_mask (now_it
.cc
& 1);
18084 close_automatic_it_block ();
18085 now_it
.state
= MANUAL_IT_BLOCK
;
18090 case MANUAL_IT_BLOCK
:
18092 /* Check conditional suffixes. */
18093 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
18096 now_it
.mask
&= 0x1f;
18097 is_last
= (now_it
.mask
== 0x10);
18098 now_it
.insn_cond
= TRUE
;
18100 switch (inst
.it_insn_type
)
18102 case OUTSIDE_IT_INSN
:
18103 inst
.error
= BAD_NOT_IT
;
18106 case INSIDE_IT_INSN
:
18107 if (cond
!= inst
.cond
)
18109 inst
.error
= BAD_IT_COND
;
18114 case INSIDE_IT_LAST_INSN
:
18115 case IF_INSIDE_IT_LAST_INSN
:
18116 if (cond
!= inst
.cond
)
18118 inst
.error
= BAD_IT_COND
;
18123 inst
.error
= BAD_BRANCH
;
18128 case NEUTRAL_IT_INSN
:
18129 /* The BKPT instruction is unconditional even in an IT block. */
18133 inst
.error
= BAD_IT_IT
;
18143 struct depr_insn_mask
18145 unsigned long pattern
;
18146 unsigned long mask
;
18147 const char* description
;
18150 /* List of 16-bit instruction patterns deprecated in an IT block in
18152 static const struct depr_insn_mask depr_it_insns
[] = {
18153 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18154 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18155 { 0xa000, 0xb800, N_("ADR") },
18156 { 0x4800, 0xf800, N_("Literal loads") },
18157 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18158 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18159 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18160 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18161 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18166 it_fsm_post_encode (void)
18170 if (!now_it
.state_handled
)
18171 handle_it_state ();
18173 if (now_it
.insn_cond
18174 && !now_it
.warn_deprecated
18175 && warn_on_deprecated
18176 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
18178 if (inst
.instruction
>= 0x10000)
18180 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18181 "deprecated in ARMv8"));
18182 now_it
.warn_deprecated
= TRUE
;
18186 const struct depr_insn_mask
*p
= depr_it_insns
;
18188 while (p
->mask
!= 0)
18190 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
18192 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18193 "of the following class are deprecated in ARMv8: "
18194 "%s"), p
->description
);
18195 now_it
.warn_deprecated
= TRUE
;
18203 if (now_it
.block_length
> 1)
18205 as_tsktsk (_("IT blocks containing more than one conditional "
18206 "instruction are deprecated in ARMv8"));
18207 now_it
.warn_deprecated
= TRUE
;
18211 is_last
= (now_it
.mask
== 0x10);
18214 now_it
.state
= OUTSIDE_IT_BLOCK
;
18220 force_automatic_it_block_close (void)
18222 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
18224 close_automatic_it_block ();
18225 now_it
.state
= OUTSIDE_IT_BLOCK
;
18233 if (!now_it
.state_handled
)
18234 handle_it_state ();
18236 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
18239 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18240 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18241 here, hence the "known" in the function name. */
18244 known_t32_only_insn (const struct asm_opcode
*opcode
)
18246 /* Original Thumb-1 wide instruction. */
18247 if (opcode
->tencode
== do_t_blx
18248 || opcode
->tencode
== do_t_branch23
18249 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
18250 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
18253 /* Wide-only instruction added to ARMv8-M Baseline. */
18254 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
18255 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
18256 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
18257 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
18263 /* Whether wide instruction variant can be used if available for a valid OPCODE
18267 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
18269 if (known_t32_only_insn (opcode
))
18272 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18273 of variant T3 of B.W is checked in do_t_branch. */
18274 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18275 && opcode
->tencode
== do_t_branch
)
18278 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18279 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18280 && opcode
->tencode
== do_t_mov_cmp
18281 /* Make sure CMP instruction is not affected. */
18282 && opcode
->aencode
== do_mov
)
18285 /* Wide instruction variants of all instructions with narrow *and* wide
18286 variants become available with ARMv6t2. Other opcodes are either
18287 narrow-only or wide-only and are thus available if OPCODE is valid. */
18288 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
18291 /* OPCODE with narrow only instruction variant or wide variant not
18297 md_assemble (char *str
)
18300 const struct asm_opcode
* opcode
;
18302 /* Align the previous label if needed. */
18303 if (last_label_seen
!= NULL
)
18305 symbol_set_frag (last_label_seen
, frag_now
);
18306 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
18307 S_SET_SEGMENT (last_label_seen
, now_seg
);
18310 memset (&inst
, '\0', sizeof (inst
));
18311 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
18313 opcode
= opcode_lookup (&p
);
18316 /* It wasn't an instruction, but it might be a register alias of
18317 the form alias .req reg, or a Neon .dn/.qn directive. */
18318 if (! create_register_alias (str
, p
)
18319 && ! create_neon_reg_alias (str
, p
))
18320 as_bad (_("bad instruction `%s'"), str
);
18325 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
18326 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18328 /* The value which unconditional instructions should have in place of the
18329 condition field. */
18330 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
18334 arm_feature_set variant
;
18336 variant
= cpu_variant
;
18337 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18338 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
18339 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
18340 /* Check that this instruction is supported for this CPU. */
18341 if (!opcode
->tvariant
18342 || (thumb_mode
== 1
18343 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
18345 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
18348 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
18349 && opcode
->tencode
!= do_t_branch
)
18351 as_bad (_("Thumb does not support conditional execution"));
18355 /* Two things are addressed here:
18356 1) Implicit require narrow instructions on Thumb-1.
18357 This avoids relaxation accidentally introducing Thumb-2
18359 2) Reject wide instructions in non Thumb-2 cores.
18361 Only instructions with narrow and wide variants need to be handled
18362 but selecting all non wide-only instructions is easier. */
18363 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
18364 && !t32_insn_ok (variant
, opcode
))
18366 if (inst
.size_req
== 0)
18368 else if (inst
.size_req
== 4)
18370 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
18371 as_bad (_("selected processor does not support 32bit wide "
18372 "variant of instruction `%s'"), str
);
18374 as_bad (_("selected processor does not support `%s' in "
18375 "Thumb-2 mode"), str
);
18380 inst
.instruction
= opcode
->tvalue
;
18382 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
18384 /* Prepare the it_insn_type for those encodings that don't set
18386 it_fsm_pre_encode ();
18388 opcode
->tencode ();
18390 it_fsm_post_encode ();
18393 if (!(inst
.error
|| inst
.relax
))
18395 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
18396 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
18397 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
18399 as_bad (_("cannot honor width suffix -- `%s'"), str
);
18404 /* Something has gone badly wrong if we try to relax a fixed size
18406 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
18408 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18409 *opcode
->tvariant
);
18410 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18411 set those bits when Thumb-2 32-bit instructions are seen. The impact
18412 of relaxable instructions will be considered later after we finish all
18414 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
18415 variant
= arm_arch_none
;
18417 variant
= cpu_variant
;
18418 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
18419 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18422 check_neon_suffixes
;
18426 mapping_state (MAP_THUMB
);
18429 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18433 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18434 is_bx
= (opcode
->aencode
== do_bx
);
18436 /* Check that this instruction is supported for this CPU. */
18437 if (!(is_bx
&& fix_v4bx
)
18438 && !(opcode
->avariant
&&
18439 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18441 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18446 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18450 inst
.instruction
= opcode
->avalue
;
18451 if (opcode
->tag
== OT_unconditionalF
)
18452 inst
.instruction
|= 0xFU
<< 28;
18454 inst
.instruction
|= inst
.cond
<< 28;
18455 inst
.size
= INSN_SIZE
;
18456 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18458 it_fsm_pre_encode ();
18459 opcode
->aencode ();
18460 it_fsm_post_encode ();
18462 /* Arm mode bx is marked as both v4T and v5 because it's still required
18463 on a hypothetical non-thumb v5 core. */
18465 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18467 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18468 *opcode
->avariant
);
18470 check_neon_suffixes
;
18474 mapping_state (MAP_ARM
);
18479 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18487 check_it_blocks_finished (void)
18492 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18493 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18494 == MANUAL_IT_BLOCK
)
18496 as_warn (_("section '%s' finished with an open IT block."),
18500 if (now_it
.state
== MANUAL_IT_BLOCK
)
18501 as_warn (_("file finished with an open IT block."));
18505 /* Various frobbings of labels and their addresses. */
18508 arm_start_line_hook (void)
18510 last_label_seen
= NULL
;
18514 arm_frob_label (symbolS
* sym
)
18516 last_label_seen
= sym
;
18518 ARM_SET_THUMB (sym
, thumb_mode
);
18520 #if defined OBJ_COFF || defined OBJ_ELF
18521 ARM_SET_INTERWORK (sym
, support_interwork
);
18524 force_automatic_it_block_close ();
18526 /* Note - do not allow local symbols (.Lxxx) to be labelled
18527 as Thumb functions. This is because these labels, whilst
18528 they exist inside Thumb code, are not the entry points for
18529 possible ARM->Thumb calls. Also, these labels can be used
18530 as part of a computed goto or switch statement. eg gcc
18531 can generate code that looks like this:
18533 ldr r2, [pc, .Laaa]
18543 The first instruction loads the address of the jump table.
18544 The second instruction converts a table index into a byte offset.
18545 The third instruction gets the jump address out of the table.
18546 The fourth instruction performs the jump.
18548 If the address stored at .Laaa is that of a symbol which has the
18549 Thumb_Func bit set, then the linker will arrange for this address
18550 to have the bottom bit set, which in turn would mean that the
18551 address computation performed by the third instruction would end
18552 up with the bottom bit set. Since the ARM is capable of unaligned
18553 word loads, the instruction would then load the incorrect address
18554 out of the jump table, and chaos would ensue. */
18555 if (label_is_thumb_function_name
18556 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18557 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18559 /* When the address of a Thumb function is taken the bottom
18560 bit of that address should be set. This will allow
18561 interworking between Arm and Thumb functions to work
18564 THUMB_SET_FUNC (sym
, 1);
18566 label_is_thumb_function_name
= FALSE
;
18569 dwarf2_emit_label (sym
);
18573 arm_data_in_code (void)
18575 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18577 *input_line_pointer
= '/';
18578 input_line_pointer
+= 5;
18579 *input_line_pointer
= 0;
18587 arm_canonicalize_symbol_name (char * name
)
18591 if (thumb_mode
&& (len
= strlen (name
)) > 5
18592 && streq (name
+ len
- 5, "/data"))
18593 *(name
+ len
- 5) = 0;
18598 /* Table of all register names defined by default. The user can
18599 define additional names with .req. Note that all register names
18600 should appear in both upper and lowercase variants. Some registers
18601 also have mixed-case names. */
18603 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18604 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18605 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18606 #define REGSET(p,t) \
18607 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18608 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18609 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18610 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18611 #define REGSETH(p,t) \
18612 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18613 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18614 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18615 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18616 #define REGSET2(p,t) \
18617 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18618 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18619 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18620 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18621 #define SPLRBANK(base,bank,t) \
18622 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18623 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18624 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18625 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18626 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18627 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18629 static const struct reg_entry reg_names
[] =
18631 /* ARM integer registers. */
18632 REGSET(r
, RN
), REGSET(R
, RN
),
18634 /* ATPCS synonyms. */
18635 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18636 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18637 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18639 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18640 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18641 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18643 /* Well-known aliases. */
18644 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18645 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18647 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18648 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18650 /* Coprocessor numbers. */
18651 REGSET(p
, CP
), REGSET(P
, CP
),
18653 /* Coprocessor register numbers. The "cr" variants are for backward
18655 REGSET(c
, CN
), REGSET(C
, CN
),
18656 REGSET(cr
, CN
), REGSET(CR
, CN
),
18658 /* ARM banked registers. */
18659 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18660 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18661 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18662 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18663 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18664 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18665 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18667 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18668 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18669 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18670 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18671 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18672 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18673 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18674 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18676 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18677 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18678 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18679 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18680 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18681 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18682 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18683 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18684 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18686 /* FPA registers. */
18687 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18688 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18690 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18691 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18693 /* VFP SP registers. */
18694 REGSET(s
,VFS
), REGSET(S
,VFS
),
18695 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18697 /* VFP DP Registers. */
18698 REGSET(d
,VFD
), REGSET(D
,VFD
),
18699 /* Extra Neon DP registers. */
18700 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18702 /* Neon QP registers. */
18703 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18705 /* VFP control registers. */
18706 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18707 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18708 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18709 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18710 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18711 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18713 /* Maverick DSP coprocessor registers. */
18714 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18715 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18717 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18718 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18719 REGDEF(dspsc
,0,DSPSC
),
18721 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18722 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18723 REGDEF(DSPSC
,0,DSPSC
),
18725 /* iWMMXt data registers - p0, c0-15. */
18726 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18728 /* iWMMXt control registers - p1, c0-3. */
18729 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18730 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18731 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18732 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18734 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18735 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18736 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18737 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18738 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18740 /* XScale accumulator registers. */
18741 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18747 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18748 within psr_required_here. */
18749 static const struct asm_psr psrs
[] =
18751 /* Backward compatibility notation. Note that "all" is no longer
18752 truly all possible PSR bits. */
18753 {"all", PSR_c
| PSR_f
},
18757 /* Individual flags. */
18763 /* Combinations of flags. */
18764 {"fs", PSR_f
| PSR_s
},
18765 {"fx", PSR_f
| PSR_x
},
18766 {"fc", PSR_f
| PSR_c
},
18767 {"sf", PSR_s
| PSR_f
},
18768 {"sx", PSR_s
| PSR_x
},
18769 {"sc", PSR_s
| PSR_c
},
18770 {"xf", PSR_x
| PSR_f
},
18771 {"xs", PSR_x
| PSR_s
},
18772 {"xc", PSR_x
| PSR_c
},
18773 {"cf", PSR_c
| PSR_f
},
18774 {"cs", PSR_c
| PSR_s
},
18775 {"cx", PSR_c
| PSR_x
},
18776 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18777 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18778 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18779 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18780 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18781 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18782 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18783 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18784 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18785 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18786 {"scf", PSR_s
| PSR_c
| PSR_f
},
18787 {"scx", PSR_s
| PSR_c
| PSR_x
},
18788 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18789 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18790 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18791 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18792 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18793 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18794 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18795 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18796 {"csf", PSR_c
| PSR_s
| PSR_f
},
18797 {"csx", PSR_c
| PSR_s
| PSR_x
},
18798 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18799 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18800 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18801 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18802 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18803 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18804 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18805 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18806 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18807 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18808 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18809 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18810 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18811 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18812 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18813 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18814 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18815 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18816 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18817 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18818 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18819 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18820 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18821 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18822 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18823 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18826 /* Table of V7M psr names. */
18827 static const struct asm_psr v7m_psrs
[] =
18829 {"apsr", 0x0 }, {"APSR", 0x0 },
18830 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
18831 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
18832 {"psr", 0x3 }, {"PSR", 0x3 },
18833 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
18834 {"ipsr", 0x5 }, {"IPSR", 0x5 },
18835 {"epsr", 0x6 }, {"EPSR", 0x6 },
18836 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
18837 {"msp", 0x8 }, {"MSP", 0x8 },
18838 {"psp", 0x9 }, {"PSP", 0x9 },
18839 {"msplim", 0xa }, {"MSPLIM", 0xa },
18840 {"psplim", 0xb }, {"PSPLIM", 0xb },
18841 {"primask", 0x10}, {"PRIMASK", 0x10},
18842 {"basepri", 0x11}, {"BASEPRI", 0x11},
18843 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
18844 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
18845 {"control", 0x14}, {"CONTROL", 0x14},
18846 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
18847 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
18848 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
18849 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
18850 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
18851 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
18852 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18853 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
18854 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
18857 /* Table of all shift-in-operand names. */
18858 static const struct asm_shift_name shift_names
[] =
18860 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18861 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18862 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18863 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18864 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18865 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18868 /* Table of all explicit relocation names. */
18870 static struct reloc_entry reloc_names
[] =
18872 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
18873 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
18874 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
18875 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
18876 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
18877 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
18878 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
18879 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
18880 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
18881 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
18882 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
18883 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
18884 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
18885 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
18886 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
18887 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
18888 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
18889 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
18893 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18894 static const struct asm_cond conds
[] =
18898 {"cs", 0x2}, {"hs", 0x2},
18899 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18913 #define UL_BARRIER(L,U,CODE,FEAT) \
18914 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18915 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18917 static struct asm_barrier_opt barrier_opt_names
[] =
18919 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
18920 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
18921 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
18922 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
18923 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
18924 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
18925 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
18926 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
18927 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
18928 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
18929 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
18930 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
18931 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
18932 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
18933 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
18934 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
18939 /* Table of ARM-format instructions. */
18941 /* Macros for gluing together operand strings. N.B. In all cases
18942 other than OPS0, the trailing OP_stop comes from default
18943 zero-initialization of the unspecified elements of the array. */
18944 #define OPS0() { OP_stop, }
18945 #define OPS1(a) { OP_##a, }
18946 #define OPS2(a,b) { OP_##a,OP_##b, }
18947 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18948 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18949 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18950 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18952 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18953 This is useful when mixing operands for ARM and THUMB, i.e. using the
18954 MIX_ARM_THUMB_OPERANDS macro.
18955 In order to use these macros, prefix the number of operands with _
18957 #define OPS_1(a) { a, }
18958 #define OPS_2(a,b) { a,b, }
18959 #define OPS_3(a,b,c) { a,b,c, }
18960 #define OPS_4(a,b,c,d) { a,b,c,d, }
18961 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18962 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18964 /* These macros abstract out the exact format of the mnemonic table and
18965 save some repeated characters. */
18967 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18968 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18969 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18970 THUMB_VARIANT, do_##ae, do_##te }
18972 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18973 a T_MNEM_xyz enumerator. */
18974 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18975 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18976 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18977 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18979 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18980 infix after the third character. */
18981 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18982 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18983 THUMB_VARIANT, do_##ae, do_##te }
18984 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18985 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18986 THUMB_VARIANT, do_##ae, do_##te }
18987 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18988 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18989 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18990 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18991 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18992 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18993 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18994 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18996 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18997 field is still 0xE. Many of the Thumb variants can be executed
18998 conditionally, so this is checked separately. */
18999 #define TUE(mnem, op, top, nops, ops, ae, te) \
19000 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19001 THUMB_VARIANT, do_##ae, do_##te }
19003 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19004 Used by mnemonics that have very minimal differences in the encoding for
19005 ARM and Thumb variants and can be handled in a common function. */
19006 #define TUEc(mnem, op, top, nops, ops, en) \
19007 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19008 THUMB_VARIANT, do_##en, do_##en }
19010 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19011 condition code field. */
19012 #define TUF(mnem, op, top, nops, ops, ae, te) \
19013 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19014 THUMB_VARIANT, do_##ae, do_##te }
19016 /* ARM-only variants of all the above. */
19017 #define CE(mnem, op, nops, ops, ae) \
19018 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19020 #define C3(mnem, op, nops, ops, ae) \
19021 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19023 /* Legacy mnemonics that always have conditional infix after the third
19025 #define CL(mnem, op, nops, ops, ae) \
19026 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19027 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19029 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19030 #define cCE(mnem, op, nops, ops, ae) \
19031 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19033 /* Legacy coprocessor instructions where conditional infix and conditional
19034 suffix are ambiguous. For consistency this includes all FPA instructions,
19035 not just the potentially ambiguous ones. */
19036 #define cCL(mnem, op, nops, ops, ae) \
19037 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19038 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19040 /* Coprocessor, takes either a suffix or a position-3 infix
19041 (for an FPA corner case). */
19042 #define C3E(mnem, op, nops, ops, ae) \
19043 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19044 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19046 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19047 { m1 #m2 m3, OPS##nops ops, \
19048 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19049 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19051 #define CM(m1, m2, op, nops, ops, ae) \
19052 xCM_ (m1, , m2, op, nops, ops, ae), \
19053 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19054 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19055 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19056 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19057 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19058 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19059 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19060 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19061 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19062 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19063 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19064 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19065 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19066 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19067 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19068 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19069 xCM_ (m1, le, m2, op, nops, ops, ae), \
19070 xCM_ (m1, al, m2, op, nops, ops, ae)
19072 #define UE(mnem, op, nops, ops, ae) \
19073 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19075 #define UF(mnem, op, nops, ops, ae) \
19076 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19078 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19079 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19080 use the same encoding function for each. */
19081 #define NUF(mnem, op, nops, ops, enc) \
19082 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19083 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19085 /* Neon data processing, version which indirects through neon_enc_tab for
19086 the various overloaded versions of opcodes. */
19087 #define nUF(mnem, op, nops, ops, enc) \
19088 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19089 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19091 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19093 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19094 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19095 THUMB_VARIANT, do_##enc, do_##enc }
19097 #define NCE(mnem, op, nops, ops, enc) \
19098 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19100 #define NCEF(mnem, op, nops, ops, enc) \
19101 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19103 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19104 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19105 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19106 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19108 #define nCE(mnem, op, nops, ops, enc) \
19109 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19111 #define nCEF(mnem, op, nops, ops, enc) \
19112 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19116 static const struct asm_opcode insns
[] =
19118 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19119 #define THUMB_VARIANT & arm_ext_v4t
19120 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19121 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19122 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19123 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19124 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19125 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19126 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19127 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19128 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19129 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19130 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19131 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19132 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19133 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19134 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19135 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19137 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19138 for setting PSR flag bits. They are obsolete in V6 and do not
19139 have Thumb equivalents. */
19140 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19141 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19142 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
19143 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19144 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19145 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
19146 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19147 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19148 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
19150 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
19151 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
19152 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19153 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19155 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
19156 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19157 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
19159 OP_ADDRGLDR
),ldst
, t_ldst
),
19160 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19162 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19163 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19164 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19165 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19166 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19167 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19169 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19170 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19171 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
19172 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
19175 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
19176 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
19177 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
19178 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
19180 /* Thumb-compatibility pseudo ops. */
19181 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19182 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19183 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19184 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19185 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19186 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19187 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19188 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19189 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
19190 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
19191 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
19192 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
19194 /* These may simplify to neg. */
19195 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19196 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19198 #undef THUMB_VARIANT
19199 #define THUMB_VARIANT & arm_ext_v6
19201 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
19203 /* V1 instructions with no Thumb analogue prior to V6T2. */
19204 #undef THUMB_VARIANT
19205 #define THUMB_VARIANT & arm_ext_v6t2
19207 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19208 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19209 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
19211 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19212 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19213 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
19214 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19216 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19217 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19219 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19220 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19222 /* V1 instructions with no Thumb analogue at all. */
19223 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
19224 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
19226 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19227 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19228 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19229 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19230 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19231 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19232 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19233 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19236 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19237 #undef THUMB_VARIANT
19238 #define THUMB_VARIANT & arm_ext_v4t
19240 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19241 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19243 #undef THUMB_VARIANT
19244 #define THUMB_VARIANT & arm_ext_v6t2
19246 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19247 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
19249 /* Generic coprocessor instructions. */
19250 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19251 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19252 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19253 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19254 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19255 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19256 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19259 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19261 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19262 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19265 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19266 #undef THUMB_VARIANT
19267 #define THUMB_VARIANT & arm_ext_msr
19269 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
19270 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
19273 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19274 #undef THUMB_VARIANT
19275 #define THUMB_VARIANT & arm_ext_v6t2
19277 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19278 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19279 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19280 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19281 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19282 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19283 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19284 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19287 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19288 #undef THUMB_VARIANT
19289 #define THUMB_VARIANT & arm_ext_v4t
19291 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19292 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19293 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19294 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19295 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19296 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19299 #define ARM_VARIANT & arm_ext_v4t_5
19301 /* ARM Architecture 4T. */
19302 /* Note: bx (and blx) are required on V5, even if the processor does
19303 not support Thumb. */
19304 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
19307 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19308 #undef THUMB_VARIANT
19309 #define THUMB_VARIANT & arm_ext_v5t
19311 /* Note: blx has 2 variants; the .value coded here is for
19312 BLX(2). Only this variant has conditional execution. */
19313 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
19314 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
19316 #undef THUMB_VARIANT
19317 #define THUMB_VARIANT & arm_ext_v6t2
19319 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
19320 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19321 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19322 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19323 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19324 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19325 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19326 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19329 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19330 #undef THUMB_VARIANT
19331 #define THUMB_VARIANT & arm_ext_v5exp
19333 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19334 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19335 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19336 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19338 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19339 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19341 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19342 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19343 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19344 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19346 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19347 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19348 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19349 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19351 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19352 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19354 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19355 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19356 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19357 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19360 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19361 #undef THUMB_VARIANT
19362 #define THUMB_VARIANT & arm_ext_v6t2
19364 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
19365 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
19367 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
19368 ADDRGLDRS
), ldrd
, t_ldstd
),
19370 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19371 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19374 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19376 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
19379 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19380 #undef THUMB_VARIANT
19381 #define THUMB_VARIANT & arm_ext_v6
19383 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19384 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19385 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19386 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19387 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19388 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19389 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19390 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19391 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19392 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
19394 #undef THUMB_VARIANT
19395 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19397 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
19398 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19400 #undef THUMB_VARIANT
19401 #define THUMB_VARIANT & arm_ext_v6t2
19403 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19404 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19406 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
19407 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
19409 /* ARM V6 not included in V7M. */
19410 #undef THUMB_VARIANT
19411 #define THUMB_VARIANT & arm_ext_v6_notm
19412 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19413 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19414 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
19415 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
19416 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19417 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19418 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
19419 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19420 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
19421 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19422 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19423 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19424 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19425 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19426 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
19427 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
19428 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19429 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19430 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
19432 /* ARM V6 not included in V7M (eg. integer SIMD). */
19433 #undef THUMB_VARIANT
19434 #define THUMB_VARIANT & arm_ext_v6_dsp
19435 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19436 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19437 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19438 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19439 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19440 /* Old name for QASX. */
19441 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19442 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19443 /* Old name for QSAX. */
19444 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19445 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19446 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19447 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19448 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19449 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19450 /* Old name for SASX. */
19451 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19452 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19453 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19454 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19455 /* Old name for SHASX. */
19456 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19457 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19458 /* Old name for SHSAX. */
19459 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19460 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19461 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19462 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19463 /* Old name for SSAX. */
19464 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19465 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19466 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19467 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19468 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19469 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19470 /* Old name for UASX. */
19471 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19472 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19473 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19474 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19475 /* Old name for UHASX. */
19476 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19477 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19478 /* Old name for UHSAX. */
19479 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19480 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19481 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19482 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19483 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19484 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19485 /* Old name for UQASX. */
19486 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19487 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19488 /* Old name for UQSAX. */
19489 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19490 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19491 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19492 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19493 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19494 /* Old name for USAX. */
19495 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19496 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19497 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19498 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19499 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19500 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19501 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19502 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19503 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19504 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19505 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19506 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19507 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19508 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19509 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19510 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19511 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19512 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19513 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19514 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19515 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19516 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19517 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19518 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19519 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19520 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19521 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19522 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19523 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19524 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19525 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19526 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19527 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19528 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19531 #define ARM_VARIANT & arm_ext_v6k
19532 #undef THUMB_VARIANT
19533 #define THUMB_VARIANT & arm_ext_v6k
19535 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19536 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19537 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19538 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19540 #undef THUMB_VARIANT
19541 #define THUMB_VARIANT & arm_ext_v6_notm
19542 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19544 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19545 RRnpcb
), strexd
, t_strexd
),
19547 #undef THUMB_VARIANT
19548 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19549 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19551 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19553 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19555 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19557 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19560 #define ARM_VARIANT & arm_ext_sec
19561 #undef THUMB_VARIANT
19562 #define THUMB_VARIANT & arm_ext_sec
19564 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19567 #define ARM_VARIANT & arm_ext_virt
19568 #undef THUMB_VARIANT
19569 #define THUMB_VARIANT & arm_ext_virt
19571 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19572 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19575 #define ARM_VARIANT & arm_ext_pan
19576 #undef THUMB_VARIANT
19577 #define THUMB_VARIANT & arm_ext_pan
19579 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19582 #define ARM_VARIANT & arm_ext_v6t2
19583 #undef THUMB_VARIANT
19584 #define THUMB_VARIANT & arm_ext_v6t2
19586 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19587 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19588 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19589 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19591 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19592 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19594 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19595 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19596 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19597 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19599 #undef THUMB_VARIANT
19600 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19601 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19602 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19604 /* Thumb-only instructions. */
19606 #define ARM_VARIANT NULL
19607 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19608 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19610 /* ARM does not really have an IT instruction, so always allow it.
19611 The opcode is copied from Thumb in order to allow warnings in
19612 -mimplicit-it=[never | arm] modes. */
19614 #define ARM_VARIANT & arm_ext_v1
19615 #undef THUMB_VARIANT
19616 #define THUMB_VARIANT & arm_ext_v6t2
19618 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19619 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19620 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19621 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19622 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19623 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19624 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19625 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19626 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19627 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19628 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19629 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19630 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19631 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19632 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19633 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19634 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19635 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19637 /* Thumb2 only instructions. */
19639 #define ARM_VARIANT NULL
19641 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19642 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19643 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19644 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19645 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19646 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19648 /* Hardware division instructions. */
19650 #define ARM_VARIANT & arm_ext_adiv
19651 #undef THUMB_VARIANT
19652 #define THUMB_VARIANT & arm_ext_div
19654 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19655 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19657 /* ARM V6M/V7 instructions. */
19659 #define ARM_VARIANT & arm_ext_barrier
19660 #undef THUMB_VARIANT
19661 #define THUMB_VARIANT & arm_ext_barrier
19663 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19664 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19665 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19667 /* ARM V7 instructions. */
19669 #define ARM_VARIANT & arm_ext_v7
19670 #undef THUMB_VARIANT
19671 #define THUMB_VARIANT & arm_ext_v7
19673 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19674 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19677 #define ARM_VARIANT & arm_ext_mp
19678 #undef THUMB_VARIANT
19679 #define THUMB_VARIANT & arm_ext_mp
19681 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19683 /* AArchv8 instructions. */
19685 #define ARM_VARIANT & arm_ext_v8
19687 /* Instructions shared between armv8-a and armv8-m. */
19688 #undef THUMB_VARIANT
19689 #define THUMB_VARIANT & arm_ext_atomics
19691 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19692 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19693 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19694 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19695 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19696 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19697 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19698 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19699 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19700 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19702 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19704 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19706 #undef THUMB_VARIANT
19707 #define THUMB_VARIANT & arm_ext_v8
19709 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19710 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19711 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19713 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19715 /* ARMv8 T32 only. */
19717 #define ARM_VARIANT NULL
19718 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19719 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19720 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19722 /* FP for ARMv8. */
19724 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19725 #undef THUMB_VARIANT
19726 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19728 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19729 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19730 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19731 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19732 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19733 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19734 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19735 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19736 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19737 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19738 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19739 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19740 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19741 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19742 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19743 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19744 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19746 /* Crypto v1 extensions. */
19748 #define ARM_VARIANT & fpu_crypto_ext_armv8
19749 #undef THUMB_VARIANT
19750 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19752 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19753 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19754 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19755 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19756 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19757 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19758 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19759 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19760 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19761 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19762 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19763 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19764 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19765 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19768 #define ARM_VARIANT & crc_ext_armv8
19769 #undef THUMB_VARIANT
19770 #define THUMB_VARIANT & crc_ext_armv8
19771 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19772 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19773 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19774 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19775 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19776 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19778 /* ARMv8.2 RAS extension. */
19780 #define ARM_VARIANT & arm_ext_ras
19781 #undef THUMB_VARIANT
19782 #define THUMB_VARIANT & arm_ext_ras
19783 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
19786 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19787 #undef THUMB_VARIANT
19788 #define THUMB_VARIANT NULL
19790 cCE("wfs", e200110
, 1, (RR
), rd
),
19791 cCE("rfs", e300110
, 1, (RR
), rd
),
19792 cCE("wfc", e400110
, 1, (RR
), rd
),
19793 cCE("rfc", e500110
, 1, (RR
), rd
),
19795 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19796 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19797 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19798 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19800 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19801 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19802 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19803 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19805 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19806 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19807 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19808 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19809 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19810 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19811 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19812 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19813 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19814 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19815 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19816 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19818 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19819 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19820 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19821 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19822 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19823 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19824 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19825 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19826 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19827 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19828 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19829 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19831 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19832 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19833 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19834 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19835 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19836 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19837 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19838 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19839 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19840 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19841 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19842 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19844 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19845 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19846 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19847 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19848 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19849 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19850 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19851 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19852 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19853 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19854 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19855 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19857 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19858 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19859 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19860 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19861 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19862 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19863 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
19864 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
19865 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
19866 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
19867 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
19868 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
19870 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
19871 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
19872 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
19873 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
19874 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
19875 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
19876 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
19877 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
19878 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
19879 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
19880 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
19881 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
19883 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
19884 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
19885 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
19886 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
19887 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
19888 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
19889 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
19890 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
19891 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
19892 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
19893 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
19894 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
19896 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
19897 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
19898 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
19899 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
19900 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
19901 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
19902 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
19903 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
19904 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
19905 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
19906 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
19907 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
19909 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
19910 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
19911 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
19912 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
19913 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
19914 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
19915 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
19916 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
19917 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
19918 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
19919 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
19920 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
19922 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
19923 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
19924 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
19925 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
19926 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
19927 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
19928 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
19929 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
19930 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
19931 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
19932 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
19933 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
19935 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
19936 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
19937 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
19938 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
19939 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
19940 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
19941 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
19942 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
19943 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
19944 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
19945 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
19946 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
19948 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
19949 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
19950 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
19951 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
19952 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
19953 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
19954 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
19955 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
19956 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
19957 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
19958 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
19959 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
19961 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
19962 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
19963 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
19964 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
19965 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
19966 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
19967 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
19968 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
19969 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
19970 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
19971 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
19972 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
19974 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
19975 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
19976 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
19977 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
19978 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
19979 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
19980 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
19981 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
19982 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
19983 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
19984 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
19985 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
19987 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
19988 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
19989 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
19990 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
19991 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
19992 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
19993 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
19994 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
19995 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
19996 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
19997 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
19998 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
20000 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
20001 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
20002 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
20003 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
20004 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
20005 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
20006 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
20007 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
20008 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
20009 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
20010 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
20011 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
20013 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20014 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20015 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20016 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20017 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20018 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20019 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20020 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20021 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20022 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20023 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20024 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20026 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20027 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20028 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20029 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20030 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20031 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20032 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20033 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20034 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20035 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20036 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20037 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20039 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20040 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20041 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20042 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20043 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20044 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20045 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20046 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20047 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20048 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20049 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20050 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20052 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20053 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20054 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20055 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20056 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20057 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20058 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20059 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20060 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20061 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20062 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20063 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20065 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20066 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20067 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20068 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20069 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20070 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20071 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20072 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20073 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20074 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20075 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20076 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20078 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20079 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20080 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20081 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20082 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20083 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20084 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20085 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20086 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20087 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20088 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20089 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20091 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20092 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20093 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20094 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20095 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20096 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20097 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20098 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20099 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20100 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20101 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20102 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20104 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20105 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20106 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20107 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20108 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20109 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20110 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20111 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20112 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20113 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20114 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20115 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20117 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20118 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20119 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20120 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20121 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20122 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20123 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20124 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20125 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20126 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20127 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20128 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20130 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20131 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20132 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20133 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20134 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20135 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20136 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20137 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20138 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20139 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20140 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20141 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20143 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20144 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20145 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20146 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20147 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20148 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20149 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20150 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20151 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20152 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20153 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20154 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20156 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20157 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20158 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20159 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20160 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20161 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20162 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20163 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20164 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20165 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20166 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20167 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20169 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20170 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20171 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20172 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20173 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20174 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20175 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20176 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20177 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20178 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20179 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20180 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20182 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20183 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20184 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20185 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20187 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
20188 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
20189 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
20190 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
20191 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
20192 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
20193 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
20194 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
20195 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
20196 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
20197 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
20198 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
20200 /* The implementation of the FIX instruction is broken on some
20201 assemblers, in that it accepts a precision specifier as well as a
20202 rounding specifier, despite the fact that this is meaningless.
20203 To be more compatible, we accept it as well, though of course it
20204 does not set any bits. */
20205 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
20206 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
20207 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
20208 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
20209 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
20210 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
20211 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
20212 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
20213 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
20214 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
20215 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
20216 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
20217 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
20219 /* Instructions that were new with the real FPA, call them V2. */
20221 #define ARM_VARIANT & fpu_fpa_ext_v2
20223 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20224 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20225 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20226 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20227 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20228 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20231 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20233 /* Moves and type conversions. */
20234 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20235 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
20236 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
20237 cCE("fmstat", ef1fa10
, 0, (), noargs
),
20238 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
20239 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
20240 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20241 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20242 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20243 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20244 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20245 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20246 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
20247 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
20249 /* Memory operations. */
20250 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20251 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20252 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20253 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20254 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20255 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20256 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20257 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20258 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20259 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20260 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20261 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20262 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20263 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20264 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20265 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20266 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20267 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20269 /* Monadic operations. */
20270 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20271 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20272 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20274 /* Dyadic operations. */
20275 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20276 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20277 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20278 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20279 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20280 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20281 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20282 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20283 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20286 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20287 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
20288 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20289 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
20291 /* Double precision load/store are still present on single precision
20292 implementations. */
20293 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20294 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20295 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20296 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20297 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20298 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20299 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20300 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20301 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20302 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20305 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20307 /* Moves and type conversions. */
20308 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20309 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20310 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20311 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20312 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20313 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20314 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20315 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20316 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20317 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20318 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20319 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20320 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20322 /* Monadic operations. */
20323 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20324 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20325 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20327 /* Dyadic operations. */
20328 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20329 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20330 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20331 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20332 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20333 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20334 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20335 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20336 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20339 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20340 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
20341 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20342 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
20345 #define ARM_VARIANT & fpu_vfp_ext_v2
20347 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
20348 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
20349 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
20350 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
20352 /* Instructions which may belong to either the Neon or VFP instruction sets.
20353 Individual encoder functions perform additional architecture checks. */
20355 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20356 #undef THUMB_VARIANT
20357 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20359 /* These mnemonics are unique to VFP. */
20360 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
20361 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
20362 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20363 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20364 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20365 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20366 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20367 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
20368 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
20369 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
20371 /* Mnemonics shared by Neon and VFP. */
20372 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
20373 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20374 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20376 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20377 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20379 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20380 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20382 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20383 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20384 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20385 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20386 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20387 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20388 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20389 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20391 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
20392 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
20393 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
20394 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
20397 /* NOTE: All VMOV encoding is special-cased! */
20398 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
20399 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
20402 #define ARM_VARIANT & arm_ext_fp16
20403 #undef THUMB_VARIANT
20404 #define THUMB_VARIANT & arm_ext_fp16
20405 /* New instructions added from v8.2, allowing the extraction and insertion of
20406 the upper 16 bits of a 32-bit vector register. */
20407 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
20408 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
20410 #undef THUMB_VARIANT
20411 #define THUMB_VARIANT & fpu_neon_ext_v1
20413 #define ARM_VARIANT & fpu_neon_ext_v1
20415 /* Data processing with three registers of the same length. */
20416 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20417 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
20418 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
20419 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20420 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20421 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20422 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20423 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20424 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20425 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20426 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20427 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20428 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20429 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20430 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20431 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20432 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20433 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20434 /* If not immediate, fall back to neon_dyadic_i64_su.
20435 shl_imm should accept I8 I16 I32 I64,
20436 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20437 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
20438 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
20439 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
20440 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
20441 /* Logic ops, types optional & ignored. */
20442 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20443 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20444 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20445 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20446 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20447 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20448 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20449 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20450 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20451 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20452 /* Bitfield ops, untyped. */
20453 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20454 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20455 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20456 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20457 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20458 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20459 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20460 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20461 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20462 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20463 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20464 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20465 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20466 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20467 back to neon_dyadic_if_su. */
20468 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20469 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20470 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20471 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20472 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20473 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20474 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20475 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20476 /* Comparison. Type I8 I16 I32 F32. */
20477 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20478 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20479 /* As above, D registers only. */
20480 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20481 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20482 /* Int and float variants, signedness unimportant. */
20483 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20484 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20485 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20486 /* Add/sub take types I8 I16 I32 I64 F32. */
20487 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20488 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20489 /* vtst takes sizes 8, 16, 32. */
20490 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20491 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20492 /* VMUL takes I8 I16 I32 F32 P8. */
20493 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20494 /* VQD{R}MULH takes S16 S32. */
20495 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20496 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20497 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20498 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20499 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20500 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20501 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20502 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20503 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20504 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20505 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20506 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20507 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20508 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20509 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20510 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20511 /* ARM v8.1 extension. */
20512 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20513 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20514 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20515 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20517 /* Two address, int/float. Types S8 S16 S32 F32. */
20518 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20519 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20521 /* Data processing with two registers and a shift amount. */
20522 /* Right shifts, and variants with rounding.
20523 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20524 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20525 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20526 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20527 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20528 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20529 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20530 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20531 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20532 /* Shift and insert. Sizes accepted 8 16 32 64. */
20533 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20534 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20535 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20536 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20537 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20538 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20539 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20540 /* Right shift immediate, saturating & narrowing, with rounding variants.
20541 Types accepted S16 S32 S64 U16 U32 U64. */
20542 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20543 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20544 /* As above, unsigned. Types accepted S16 S32 S64. */
20545 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20546 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20547 /* Right shift narrowing. Types accepted I16 I32 I64. */
20548 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20549 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20550 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20551 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20552 /* CVT with optional immediate for fixed-point variant. */
20553 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20555 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20556 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20558 /* Data processing, three registers of different lengths. */
20559 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20560 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20561 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20562 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20563 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20564 /* If not scalar, fall back to neon_dyadic_long.
20565 Vector types as above, scalar types S16 S32 U16 U32. */
20566 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20567 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20568 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20569 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20570 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20571 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20572 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20573 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20574 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20575 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20576 /* Saturating doubling multiplies. Types S16 S32. */
20577 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20578 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20579 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20580 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20581 S16 S32 U16 U32. */
20582 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20584 /* Extract. Size 8. */
20585 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20586 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20588 /* Two registers, miscellaneous. */
20589 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20590 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20591 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20592 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20593 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20594 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20595 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20596 /* Vector replicate. Sizes 8 16 32. */
20597 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20598 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20599 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20600 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20601 /* VMOVN. Types I16 I32 I64. */
20602 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20603 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20604 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20605 /* VQMOVUN. Types S16 S32 S64. */
20606 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20607 /* VZIP / VUZP. Sizes 8 16 32. */
20608 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20609 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20610 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20611 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20612 /* VQABS / VQNEG. Types S8 S16 S32. */
20613 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20614 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20615 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20616 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20617 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20618 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20619 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20620 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20621 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20622 /* Reciprocal estimates. Types U32 F16 F32. */
20623 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20624 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20625 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20626 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20627 /* VCLS. Types S8 S16 S32. */
20628 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20629 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20630 /* VCLZ. Types I8 I16 I32. */
20631 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20632 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20633 /* VCNT. Size 8. */
20634 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20635 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20636 /* Two address, untyped. */
20637 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20638 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20639 /* VTRN. Sizes 8 16 32. */
20640 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20641 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20643 /* Table lookup. Size 8. */
20644 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20645 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20647 #undef THUMB_VARIANT
20648 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20650 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20652 /* Neon element/structure load/store. */
20653 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20654 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20655 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20656 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20657 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20658 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20659 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20660 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20662 #undef THUMB_VARIANT
20663 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20665 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20666 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20667 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20668 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20669 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20670 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20671 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20672 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20673 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20674 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20676 #undef THUMB_VARIANT
20677 #define THUMB_VARIANT & fpu_vfp_ext_v3
20679 #define ARM_VARIANT & fpu_vfp_ext_v3
20681 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20682 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20683 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20684 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20685 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20686 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20687 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20688 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20689 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20692 #define ARM_VARIANT & fpu_vfp_ext_fma
20693 #undef THUMB_VARIANT
20694 #define THUMB_VARIANT & fpu_vfp_ext_fma
20695 /* Mnemonics shared by Neon and VFP. These are included in the
20696 VFP FMA variant; NEON and VFP FMA always includes the NEON
20697 FMA instructions. */
20698 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20699 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20700 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20701 the v form should always be used. */
20702 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20703 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20704 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20705 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20706 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20707 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20709 #undef THUMB_VARIANT
20711 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20713 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20714 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20715 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20716 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20717 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20718 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20719 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20720 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20723 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20725 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20726 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20727 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20728 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20729 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20730 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20731 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20732 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20733 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20734 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20735 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20736 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20737 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20738 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20739 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20740 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20741 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20742 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20743 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20744 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20745 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20746 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20747 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20748 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20749 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20750 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20751 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20752 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20753 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20754 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20755 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20756 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20757 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20758 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20759 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20760 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20761 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20762 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20763 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20764 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20765 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20766 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20767 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20768 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20769 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20770 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20771 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20772 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20773 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20774 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20775 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20776 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20777 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20778 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20779 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20780 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20781 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20782 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20783 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20784 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20785 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20786 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20787 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20788 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20789 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20790 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20791 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20792 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20793 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20794 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20795 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20796 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20797 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20798 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20799 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20800 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20801 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20802 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20803 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20804 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20805 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20806 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20807 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20808 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20809 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20810 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20811 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20812 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20813 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20814 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20815 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20816 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20817 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20818 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20819 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20820 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20821 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20822 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20823 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20824 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20825 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20826 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20827 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20828 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20829 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20830 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20831 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20832 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20833 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20834 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20835 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20836 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20837 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20838 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20839 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20840 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20841 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20842 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20843 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20844 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20845 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20846 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20847 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20848 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20849 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20850 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20851 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20852 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20853 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20854 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20855 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20856 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20857 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20858 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20859 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20860 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20861 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20862 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20863 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20864 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20865 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20866 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20867 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20868 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20869 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20870 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20871 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20872 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20873 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20874 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20875 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20876 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20877 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20878 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20879 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20880 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20881 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20882 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20883 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20884 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20885 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20886 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
20889 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20891 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
20892 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
20893 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
20894 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20895 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20896 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20897 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20898 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20899 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20900 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20901 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20902 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20903 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20904 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20905 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20906 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20907 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20908 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20909 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20910 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20911 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
20912 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20913 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20914 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20915 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20916 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20917 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20918 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20919 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20920 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20921 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20922 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20923 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20924 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20925 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20926 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20927 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20928 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20929 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20930 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20931 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20932 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20933 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20934 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20935 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20936 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20937 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20938 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20939 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20940 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20941 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20942 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20943 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20944 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20945 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20946 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20947 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20950 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20952 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20953 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20954 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20955 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20956 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20957 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20958 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20959 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20960 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
20961 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
20962 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
20963 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
20964 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
20965 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
20966 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
20967 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
20968 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
20969 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
20970 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
20971 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
20972 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
20973 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
20974 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
20975 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
20976 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
20977 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
20978 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
20979 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
20980 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
20981 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
20982 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
20983 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
20984 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
20985 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
20986 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
20987 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
20988 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
20989 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
20990 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
20991 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
20992 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
20993 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
20994 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
20995 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
20996 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
20997 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
20998 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
20999 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
21000 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
21001 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
21002 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
21003 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
21004 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
21005 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
21006 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21007 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21008 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21009 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21010 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21011 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21012 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
21013 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
21014 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
21015 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
21016 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21017 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21018 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21019 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21020 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21021 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21022 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21023 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21024 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21025 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21026 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21027 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21029 /* ARMv8-M instructions. */
21031 #define ARM_VARIANT NULL
21032 #undef THUMB_VARIANT
21033 #define THUMB_VARIANT & arm_ext_v8m
21034 TUE("sg", 0, e97fe97f
, 0, (), 0, noargs
),
21035 TUE("blxns", 0, 4784, 1, (RRnpc
), 0, t_blx
),
21036 TUE("bxns", 0, 4704, 1, (RRnpc
), 0, t_bx
),
21037 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
21038 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
21039 TUE("tta", 0, e840f080
, 2, (RRnpc
, RRnpc
), 0, tt
),
21040 TUE("ttat", 0, e840f0c0
, 2, (RRnpc
, RRnpc
), 0, tt
),
21042 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21043 instructions behave as nop if no VFP is present. */
21044 #undef THUMB_VARIANT
21045 #define THUMB_VARIANT & arm_ext_v8m_main
21046 TUEc("vlldm", 0, ec300a00
, 1, (RRnpc
), rn
),
21047 TUEc("vlstm", 0, ec200a00
, 1, (RRnpc
), rn
),
21050 #undef THUMB_VARIANT
21076 /* MD interface: bits in the object file. */
21078 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21079 for use in the a.out file, and stores them in the array pointed to by buf.
21080 This knows about the endian-ness of the target machine and does
21081 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21082 2 (short) and 4 (long) Floating numbers are put out as a series of
21083 LITTLENUMS (shorts, here at least). */
21086 md_number_to_chars (char * buf
, valueT val
, int n
)
21088 if (target_big_endian
)
21089 number_to_chars_bigendian (buf
, val
, n
);
21091 number_to_chars_littleendian (buf
, val
, n
);
21095 md_chars_to_number (char * buf
, int n
)
21098 unsigned char * where
= (unsigned char *) buf
;
21100 if (target_big_endian
)
21105 result
|= (*where
++ & 255);
21113 result
|= (where
[n
] & 255);
21120 /* MD interface: Sections. */
21122 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21123 that an rs_machine_dependent frag may reach. */
21126 arm_frag_max_var (fragS
*fragp
)
21128 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21129 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21131 Note that we generate relaxable instructions even for cases that don't
21132 really need it, like an immediate that's a trivial constant. So we're
21133 overestimating the instruction size for some of those cases. Rather
21134 than putting more intelligence here, it would probably be better to
21135 avoid generating a relaxation frag in the first place when it can be
21136 determined up front that a short instruction will suffice. */
21138 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
21142 /* Estimate the size of a frag before relaxing. Assume everything fits in
21146 md_estimate_size_before_relax (fragS
* fragp
,
21147 segT segtype ATTRIBUTE_UNUSED
)
21153 /* Convert a machine dependent frag. */
21156 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
21158 unsigned long insn
;
21159 unsigned long old_op
;
21167 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21169 old_op
= bfd_get_16(abfd
, buf
);
21170 if (fragp
->fr_symbol
)
21172 exp
.X_op
= O_symbol
;
21173 exp
.X_add_symbol
= fragp
->fr_symbol
;
21177 exp
.X_op
= O_constant
;
21179 exp
.X_add_number
= fragp
->fr_offset
;
21180 opcode
= fragp
->fr_subtype
;
21183 case T_MNEM_ldr_pc
:
21184 case T_MNEM_ldr_pc2
:
21185 case T_MNEM_ldr_sp
:
21186 case T_MNEM_str_sp
:
21193 if (fragp
->fr_var
== 4)
21195 insn
= THUMB_OP32 (opcode
);
21196 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
21198 insn
|= (old_op
& 0x700) << 4;
21202 insn
|= (old_op
& 7) << 12;
21203 insn
|= (old_op
& 0x38) << 13;
21205 insn
|= 0x00000c00;
21206 put_thumb32_insn (buf
, insn
);
21207 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
21211 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
21213 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
21216 if (fragp
->fr_var
== 4)
21218 insn
= THUMB_OP32 (opcode
);
21219 insn
|= (old_op
& 0xf0) << 4;
21220 put_thumb32_insn (buf
, insn
);
21221 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
21225 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21226 exp
.X_add_number
-= 4;
21234 if (fragp
->fr_var
== 4)
21236 int r0off
= (opcode
== T_MNEM_mov
21237 || opcode
== T_MNEM_movs
) ? 0 : 8;
21238 insn
= THUMB_OP32 (opcode
);
21239 insn
= (insn
& 0xe1ffffff) | 0x10000000;
21240 insn
|= (old_op
& 0x700) << r0off
;
21241 put_thumb32_insn (buf
, insn
);
21242 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21246 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
21251 if (fragp
->fr_var
== 4)
21253 insn
= THUMB_OP32(opcode
);
21254 put_thumb32_insn (buf
, insn
);
21255 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
21258 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
21262 if (fragp
->fr_var
== 4)
21264 insn
= THUMB_OP32(opcode
);
21265 insn
|= (old_op
& 0xf00) << 14;
21266 put_thumb32_insn (buf
, insn
);
21267 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
21270 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
21273 case T_MNEM_add_sp
:
21274 case T_MNEM_add_pc
:
21275 case T_MNEM_inc_sp
:
21276 case T_MNEM_dec_sp
:
21277 if (fragp
->fr_var
== 4)
21279 /* ??? Choose between add and addw. */
21280 insn
= THUMB_OP32 (opcode
);
21281 insn
|= (old_op
& 0xf0) << 4;
21282 put_thumb32_insn (buf
, insn
);
21283 if (opcode
== T_MNEM_add_pc
)
21284 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
21286 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21289 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21297 if (fragp
->fr_var
== 4)
21299 insn
= THUMB_OP32 (opcode
);
21300 insn
|= (old_op
& 0xf0) << 4;
21301 insn
|= (old_op
& 0xf) << 16;
21302 put_thumb32_insn (buf
, insn
);
21303 if (insn
& (1 << 20))
21304 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21306 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21309 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21315 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
21316 (enum bfd_reloc_code_real
) reloc_type
);
21317 fixp
->fx_file
= fragp
->fr_file
;
21318 fixp
->fx_line
= fragp
->fr_line
;
21319 fragp
->fr_fix
+= fragp
->fr_var
;
21321 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21322 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
21323 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
21324 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
21327 /* Return the size of a relaxable immediate operand instruction.
21328 SHIFT and SIZE specify the form of the allowable immediate. */
21330 relax_immediate (fragS
*fragp
, int size
, int shift
)
21336 /* ??? Should be able to do better than this. */
21337 if (fragp
->fr_symbol
)
21340 low
= (1 << shift
) - 1;
21341 mask
= (1 << (shift
+ size
)) - (1 << shift
);
21342 offset
= fragp
->fr_offset
;
21343 /* Force misaligned offsets to 32-bit variant. */
21346 if (offset
& ~mask
)
21351 /* Get the address of a symbol during relaxation. */
21353 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
21359 sym
= fragp
->fr_symbol
;
21360 sym_frag
= symbol_get_frag (sym
);
21361 know (S_GET_SEGMENT (sym
) != absolute_section
21362 || sym_frag
== &zero_address_frag
);
21363 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
21365 /* If frag has yet to be reached on this pass, assume it will
21366 move by STRETCH just as we did. If this is not so, it will
21367 be because some frag between grows, and that will force
21371 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
21375 /* Adjust stretch for any alignment frag. Note that if have
21376 been expanding the earlier code, the symbol may be
21377 defined in what appears to be an earlier frag. FIXME:
21378 This doesn't handle the fr_subtype field, which specifies
21379 a maximum number of bytes to skip when doing an
21381 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
21383 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
21386 stretch
= - ((- stretch
)
21387 & ~ ((1 << (int) f
->fr_offset
) - 1));
21389 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
21401 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21404 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
21409 /* Assume worst case for symbols not known to be in the same section. */
21410 if (fragp
->fr_symbol
== NULL
21411 || !S_IS_DEFINED (fragp
->fr_symbol
)
21412 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21413 || S_IS_WEAK (fragp
->fr_symbol
))
21416 val
= relaxed_symbol_addr (fragp
, stretch
);
21417 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
21418 addr
= (addr
+ 4) & ~3;
21419 /* Force misaligned targets to 32-bit variant. */
21423 if (val
< 0 || val
> 1020)
21428 /* Return the size of a relaxable add/sub immediate instruction. */
21430 relax_addsub (fragS
*fragp
, asection
*sec
)
21435 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21436 op
= bfd_get_16(sec
->owner
, buf
);
21437 if ((op
& 0xf) == ((op
>> 4) & 0xf))
21438 return relax_immediate (fragp
, 8, 0);
21440 return relax_immediate (fragp
, 3, 0);
21443 /* Return TRUE iff the definition of symbol S could be pre-empted
21444 (overridden) at link or load time. */
21446 symbol_preemptible (symbolS
*s
)
21448 /* Weak symbols can always be pre-empted. */
21452 /* Non-global symbols cannot be pre-empted. */
21453 if (! S_IS_EXTERNAL (s
))
21457 /* In ELF, a global symbol can be marked protected, or private. In that
21458 case it can't be pre-empted (other definitions in the same link unit
21459 would violate the ODR). */
21460 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21464 /* Other global symbols might be pre-empted. */
21468 /* Return the size of a relaxable branch instruction. BITS is the
21469 size of the offset field in the narrow instruction. */
21472 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21478 /* Assume worst case for symbols not known to be in the same section. */
21479 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21480 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21481 || S_IS_WEAK (fragp
->fr_symbol
))
21485 /* A branch to a function in ARM state will require interworking. */
21486 if (S_IS_DEFINED (fragp
->fr_symbol
)
21487 && ARM_IS_FUNC (fragp
->fr_symbol
))
21491 if (symbol_preemptible (fragp
->fr_symbol
))
21494 val
= relaxed_symbol_addr (fragp
, stretch
);
21495 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21498 /* Offset is a signed value *2 */
21500 if (val
>= limit
|| val
< -limit
)
21506 /* Relax a machine dependent frag. This returns the amount by which
21507 the current size of the frag should change. */
21510 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21515 oldsize
= fragp
->fr_var
;
21516 switch (fragp
->fr_subtype
)
21518 case T_MNEM_ldr_pc2
:
21519 newsize
= relax_adr (fragp
, sec
, stretch
);
21521 case T_MNEM_ldr_pc
:
21522 case T_MNEM_ldr_sp
:
21523 case T_MNEM_str_sp
:
21524 newsize
= relax_immediate (fragp
, 8, 2);
21528 newsize
= relax_immediate (fragp
, 5, 2);
21532 newsize
= relax_immediate (fragp
, 5, 1);
21536 newsize
= relax_immediate (fragp
, 5, 0);
21539 newsize
= relax_adr (fragp
, sec
, stretch
);
21545 newsize
= relax_immediate (fragp
, 8, 0);
21548 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21551 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21553 case T_MNEM_add_sp
:
21554 case T_MNEM_add_pc
:
21555 newsize
= relax_immediate (fragp
, 8, 2);
21557 case T_MNEM_inc_sp
:
21558 case T_MNEM_dec_sp
:
21559 newsize
= relax_immediate (fragp
, 7, 2);
21565 newsize
= relax_addsub (fragp
, sec
);
21571 fragp
->fr_var
= newsize
;
21572 /* Freeze wide instructions that are at or before the same location as
21573 in the previous pass. This avoids infinite loops.
21574 Don't freeze them unconditionally because targets may be artificially
21575 misaligned by the expansion of preceding frags. */
21576 if (stretch
<= 0 && newsize
> 2)
21578 md_convert_frag (sec
->owner
, sec
, fragp
);
21582 return newsize
- oldsize
;
21585 /* Round up a section size to the appropriate boundary. */
21588 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21591 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21592 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21594 /* For a.out, force the section size to be aligned. If we don't do
21595 this, BFD will align it for us, but it will not write out the
21596 final bytes of the section. This may be a bug in BFD, but it is
21597 easier to fix it here since that is how the other a.out targets
21601 align
= bfd_get_section_alignment (stdoutput
, segment
);
21602 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21609 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21610 of an rs_align_code fragment. */
21613 arm_handle_align (fragS
* fragP
)
21615 static unsigned char const arm_noop
[2][2][4] =
21618 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21619 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21622 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21623 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21626 static unsigned char const thumb_noop
[2][2][2] =
21629 {0xc0, 0x46}, /* LE */
21630 {0x46, 0xc0}, /* BE */
21633 {0x00, 0xbf}, /* LE */
21634 {0xbf, 0x00} /* BE */
21637 static unsigned char const wide_thumb_noop
[2][4] =
21638 { /* Wide Thumb-2 */
21639 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21640 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21643 unsigned bytes
, fix
, noop_size
;
21645 const unsigned char * noop
;
21646 const unsigned char *narrow_noop
= NULL
;
21651 if (fragP
->fr_type
!= rs_align_code
)
21654 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21655 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21658 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21659 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21661 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21663 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21665 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21666 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21668 narrow_noop
= thumb_noop
[1][target_big_endian
];
21669 noop
= wide_thumb_noop
[target_big_endian
];
21672 noop
= thumb_noop
[0][target_big_endian
];
21680 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21681 ? selected_cpu
: arm_arch_none
,
21683 [target_big_endian
];
21690 fragP
->fr_var
= noop_size
;
21692 if (bytes
& (noop_size
- 1))
21694 fix
= bytes
& (noop_size
- 1);
21696 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21698 memset (p
, 0, fix
);
21705 if (bytes
& noop_size
)
21707 /* Insert a narrow noop. */
21708 memcpy (p
, narrow_noop
, noop_size
);
21710 bytes
-= noop_size
;
21714 /* Use wide noops for the remainder */
21718 while (bytes
>= noop_size
)
21720 memcpy (p
, noop
, noop_size
);
21722 bytes
-= noop_size
;
21726 fragP
->fr_fix
+= fix
;
21729 /* Called from md_do_align. Used to create an alignment
21730 frag in a code section. */
21733 arm_frag_align_code (int n
, int max
)
21737 /* We assume that there will never be a requirement
21738 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21739 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21744 _("alignments greater than %d bytes not supported in .text sections."),
21745 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21746 as_fatal ("%s", err_msg
);
21749 p
= frag_var (rs_align_code
,
21750 MAX_MEM_FOR_RS_ALIGN_CODE
,
21752 (relax_substateT
) max
,
21759 /* Perform target specific initialisation of a frag.
21760 Note - despite the name this initialisation is not done when the frag
21761 is created, but only when its type is assigned. A frag can be created
21762 and used a long time before its type is set, so beware of assuming that
21763 this initialisationis performed first. */
21767 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21769 /* Record whether this frag is in an ARM or a THUMB area. */
21770 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21773 #else /* OBJ_ELF is defined. */
21775 arm_init_frag (fragS
* fragP
, int max_chars
)
21777 int frag_thumb_mode
;
21779 /* If the current ARM vs THUMB mode has not already
21780 been recorded into this frag then do so now. */
21781 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21782 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21784 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
21786 /* Record a mapping symbol for alignment frags. We will delete this
21787 later if the alignment ends up empty. */
21788 switch (fragP
->fr_type
)
21791 case rs_align_test
:
21793 mapping_state_2 (MAP_DATA
, max_chars
);
21795 case rs_align_code
:
21796 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21803 /* When we change sections we need to issue a new mapping symbol. */
21806 arm_elf_change_section (void)
21808 /* Link an unlinked unwind index table section to the .text section. */
21809 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21810 && elf_linked_to_section (now_seg
) == NULL
)
21811 elf_linked_to_section (now_seg
) = text_section
;
21815 arm_elf_section_type (const char * str
, size_t len
)
21817 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21818 return SHT_ARM_EXIDX
;
21823 /* Code to deal with unwinding tables. */
21825 static void add_unwind_adjustsp (offsetT
);
21827 /* Generate any deferred unwind frame offset. */
21830 flush_pending_unwind (void)
21834 offset
= unwind
.pending_offset
;
21835 unwind
.pending_offset
= 0;
21837 add_unwind_adjustsp (offset
);
21840 /* Add an opcode to this list for this function. Two-byte opcodes should
21841 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21845 add_unwind_opcode (valueT op
, int length
)
21847 /* Add any deferred stack adjustment. */
21848 if (unwind
.pending_offset
)
21849 flush_pending_unwind ();
21851 unwind
.sp_restored
= 0;
21853 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21855 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21856 if (unwind
.opcodes
)
21857 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
21858 unwind
.opcode_alloc
);
21860 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
21865 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
21867 unwind
.opcode_count
++;
21871 /* Add unwind opcodes to adjust the stack pointer. */
21874 add_unwind_adjustsp (offsetT offset
)
21878 if (offset
> 0x200)
21880 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21885 /* Long form: 0xb2, uleb128. */
21886 /* This might not fit in a word so add the individual bytes,
21887 remembering the list is built in reverse order. */
21888 o
= (valueT
) ((offset
- 0x204) >> 2);
21890 add_unwind_opcode (0, 1);
21892 /* Calculate the uleb128 encoding of the offset. */
21896 bytes
[n
] = o
& 0x7f;
21902 /* Add the insn. */
21904 add_unwind_opcode (bytes
[n
- 1], 1);
21905 add_unwind_opcode (0xb2, 1);
21907 else if (offset
> 0x100)
21909 /* Two short opcodes. */
21910 add_unwind_opcode (0x3f, 1);
21911 op
= (offset
- 0x104) >> 2;
21912 add_unwind_opcode (op
, 1);
21914 else if (offset
> 0)
21916 /* Short opcode. */
21917 op
= (offset
- 4) >> 2;
21918 add_unwind_opcode (op
, 1);
21920 else if (offset
< 0)
21923 while (offset
> 0x100)
21925 add_unwind_opcode (0x7f, 1);
21928 op
= ((offset
- 4) >> 2) | 0x40;
21929 add_unwind_opcode (op
, 1);
21933 /* Finish the list of unwind opcodes for this function. */
21935 finish_unwind_opcodes (void)
21939 if (unwind
.fp_used
)
21941 /* Adjust sp as necessary. */
21942 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
21943 flush_pending_unwind ();
21945 /* After restoring sp from the frame pointer. */
21946 op
= 0x90 | unwind
.fp_reg
;
21947 add_unwind_opcode (op
, 1);
21950 flush_pending_unwind ();
21954 /* Start an exception table entry. If idx is nonzero this is an index table
21958 start_unwind_section (const segT text_seg
, int idx
)
21960 const char * text_name
;
21961 const char * prefix
;
21962 const char * prefix_once
;
21963 const char * group_name
;
21971 prefix
= ELF_STRING_ARM_unwind
;
21972 prefix_once
= ELF_STRING_ARM_unwind_once
;
21973 type
= SHT_ARM_EXIDX
;
21977 prefix
= ELF_STRING_ARM_unwind_info
;
21978 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
21979 type
= SHT_PROGBITS
;
21982 text_name
= segment_name (text_seg
);
21983 if (streq (text_name
, ".text"))
21986 if (strncmp (text_name
, ".gnu.linkonce.t.",
21987 strlen (".gnu.linkonce.t.")) == 0)
21989 prefix
= prefix_once
;
21990 text_name
+= strlen (".gnu.linkonce.t.");
21993 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
21999 /* Handle COMDAT group. */
22000 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
22002 group_name
= elf_group_name (text_seg
);
22003 if (group_name
== NULL
)
22005 as_bad (_("Group section `%s' has no group signature"),
22006 segment_name (text_seg
));
22007 ignore_rest_of_line ();
22010 flags
|= SHF_GROUP
;
22014 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
22016 /* Set the section link for index tables. */
22018 elf_linked_to_section (now_seg
) = text_seg
;
22022 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22023 personality routine data. Returns zero, or the index table value for
22024 an inline entry. */
22027 create_unwind_entry (int have_data
)
22032 /* The current word of data. */
22034 /* The number of bytes left in this word. */
22037 finish_unwind_opcodes ();
22039 /* Remember the current text section. */
22040 unwind
.saved_seg
= now_seg
;
22041 unwind
.saved_subseg
= now_subseg
;
22043 start_unwind_section (now_seg
, 0);
22045 if (unwind
.personality_routine
== NULL
)
22047 if (unwind
.personality_index
== -2)
22050 as_bad (_("handlerdata in cantunwind frame"));
22051 return 1; /* EXIDX_CANTUNWIND. */
22054 /* Use a default personality routine if none is specified. */
22055 if (unwind
.personality_index
== -1)
22057 if (unwind
.opcode_count
> 3)
22058 unwind
.personality_index
= 1;
22060 unwind
.personality_index
= 0;
22063 /* Space for the personality routine entry. */
22064 if (unwind
.personality_index
== 0)
22066 if (unwind
.opcode_count
> 3)
22067 as_bad (_("too many unwind opcodes for personality routine 0"));
22071 /* All the data is inline in the index table. */
22074 while (unwind
.opcode_count
> 0)
22076 unwind
.opcode_count
--;
22077 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22081 /* Pad with "finish" opcodes. */
22083 data
= (data
<< 8) | 0xb0;
22090 /* We get two opcodes "free" in the first word. */
22091 size
= unwind
.opcode_count
- 2;
22095 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22096 if (unwind
.personality_index
!= -1)
22098 as_bad (_("attempt to recreate an unwind entry"));
22102 /* An extra byte is required for the opcode count. */
22103 size
= unwind
.opcode_count
+ 1;
22106 size
= (size
+ 3) >> 2;
22108 as_bad (_("too many unwind opcodes"));
22110 frag_align (2, 0, 0);
22111 record_alignment (now_seg
, 2);
22112 unwind
.table_entry
= expr_build_dot ();
22114 /* Allocate the table entry. */
22115 ptr
= frag_more ((size
<< 2) + 4);
22116 /* PR 13449: Zero the table entries in case some of them are not used. */
22117 memset (ptr
, 0, (size
<< 2) + 4);
22118 where
= frag_now_fix () - ((size
<< 2) + 4);
22120 switch (unwind
.personality_index
)
22123 /* ??? Should this be a PLT generating relocation? */
22124 /* Custom personality routine. */
22125 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
22126 BFD_RELOC_ARM_PREL31
);
22131 /* Set the first byte to the number of additional words. */
22132 data
= size
> 0 ? size
- 1 : 0;
22136 /* ABI defined personality routines. */
22138 /* Three opcodes bytes are packed into the first word. */
22145 /* The size and first two opcode bytes go in the first word. */
22146 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
22151 /* Should never happen. */
22155 /* Pack the opcodes into words (MSB first), reversing the list at the same
22157 while (unwind
.opcode_count
> 0)
22161 md_number_to_chars (ptr
, data
, 4);
22166 unwind
.opcode_count
--;
22168 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22171 /* Finish off the last word. */
22174 /* Pad with "finish" opcodes. */
22176 data
= (data
<< 8) | 0xb0;
22178 md_number_to_chars (ptr
, data
, 4);
22183 /* Add an empty descriptor if there is no user-specified data. */
22184 ptr
= frag_more (4);
22185 md_number_to_chars (ptr
, 0, 4);
22192 /* Initialize the DWARF-2 unwind information for this procedure. */
22195 tc_arm_frame_initial_instructions (void)
22197 cfi_add_CFA_def_cfa (REG_SP
, 0);
22199 #endif /* OBJ_ELF */
22201 /* Convert REGNAME to a DWARF-2 register number. */
22204 tc_arm_regname_to_dw2regnum (char *regname
)
22206 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
22210 /* PR 16694: Allow VFP registers as well. */
22211 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
22215 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
22224 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
22228 exp
.X_op
= O_secrel
;
22229 exp
.X_add_symbol
= symbol
;
22230 exp
.X_add_number
= 0;
22231 emit_expr (&exp
, size
);
22235 /* MD interface: Symbol and relocation handling. */
22237 /* Return the address within the segment that a PC-relative fixup is
22238 relative to. For ARM, PC-relative fixups applied to instructions
22239 are generally relative to the location of the fixup plus 8 bytes.
22240 Thumb branches are offset by 4, and Thumb loads relative to PC
22241 require special handling. */
22244 md_pcrel_from_section (fixS
* fixP
, segT seg
)
22246 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22248 /* If this is pc-relative and we are going to emit a relocation
22249 then we just want to put out any pipeline compensation that the linker
22250 will need. Otherwise we want to use the calculated base.
22251 For WinCE we skip the bias for externals as well, since this
22252 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22254 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22255 || (arm_force_relocation (fixP
)
22257 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
22263 switch (fixP
->fx_r_type
)
22265 /* PC relative addressing on the Thumb is slightly odd as the
22266 bottom two bits of the PC are forced to zero for the
22267 calculation. This happens *after* application of the
22268 pipeline offset. However, Thumb adrl already adjusts for
22269 this, so we need not do it again. */
22270 case BFD_RELOC_ARM_THUMB_ADD
:
22273 case BFD_RELOC_ARM_THUMB_OFFSET
:
22274 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22275 case BFD_RELOC_ARM_T32_ADD_PC12
:
22276 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
22277 return (base
+ 4) & ~3;
22279 /* Thumb branches are simply offset by +4. */
22280 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
22281 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
22282 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
22283 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22284 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
22287 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22289 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22290 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22291 && ARM_IS_FUNC (fixP
->fx_addsy
)
22292 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22293 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22296 /* BLX is like branches above, but forces the low two bits of PC to
22298 case BFD_RELOC_THUMB_PCREL_BLX
:
22300 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22301 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22302 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22303 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22304 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22305 return (base
+ 4) & ~3;
22307 /* ARM mode branches are offset by +8. However, the Windows CE
22308 loader expects the relocation not to take this into account. */
22309 case BFD_RELOC_ARM_PCREL_BLX
:
22311 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22312 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22313 && ARM_IS_FUNC (fixP
->fx_addsy
)
22314 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22315 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22318 case BFD_RELOC_ARM_PCREL_CALL
:
22320 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22321 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22322 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22323 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22324 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22327 case BFD_RELOC_ARM_PCREL_BRANCH
:
22328 case BFD_RELOC_ARM_PCREL_JUMP
:
22329 case BFD_RELOC_ARM_PLT32
:
22331 /* When handling fixups immediately, because we have already
22332 discovered the value of a symbol, or the address of the frag involved
22333 we must account for the offset by +8, as the OS loader will never see the reloc.
22334 see fixup_segment() in write.c
22335 The S_IS_EXTERNAL test handles the case of global symbols.
22336 Those need the calculated base, not just the pipe compensation the linker will need. */
22338 && fixP
->fx_addsy
!= NULL
22339 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22340 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
22348 /* ARM mode loads relative to PC are also offset by +8. Unlike
22349 branches, the Windows CE loader *does* expect the relocation
22350 to take this into account. */
22351 case BFD_RELOC_ARM_OFFSET_IMM
:
22352 case BFD_RELOC_ARM_OFFSET_IMM8
:
22353 case BFD_RELOC_ARM_HWLITERAL
:
22354 case BFD_RELOC_ARM_LITERAL
:
22355 case BFD_RELOC_ARM_CP_OFF_IMM
:
22359 /* Other PC-relative relocations are un-offset. */
22365 static bfd_boolean flag_warn_syms
= TRUE
;
22368 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
22370 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22371 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22372 does mean that the resulting code might be very confusing to the reader.
22373 Also this warning can be triggered if the user omits an operand before
22374 an immediate address, eg:
22378 GAS treats this as an assignment of the value of the symbol foo to a
22379 symbol LDR, and so (without this code) it will not issue any kind of
22380 warning or error message.
22382 Note - ARM instructions are case-insensitive but the strings in the hash
22383 table are all stored in lower case, so we must first ensure that name is
22385 if (flag_warn_syms
&& arm_ops_hsh
)
22387 char * nbuf
= strdup (name
);
22390 for (p
= nbuf
; *p
; p
++)
22392 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
22394 static struct hash_control
* already_warned
= NULL
;
22396 if (already_warned
== NULL
)
22397 already_warned
= hash_new ();
22398 /* Only warn about the symbol once. To keep the code
22399 simple we let hash_insert do the lookup for us. */
22400 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
22401 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
22410 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22411 Otherwise we have no need to default values of symbols. */
22414 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
22417 if (name
[0] == '_' && name
[1] == 'G'
22418 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
22422 if (symbol_find (name
))
22423 as_bad (_("GOT already in the symbol table"));
22425 GOT_symbol
= symbol_new (name
, undefined_section
,
22426 (valueT
) 0, & zero_address_frag
);
22436 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22437 computed as two separate immediate values, added together. We
22438 already know that this value cannot be computed by just one ARM
22441 static unsigned int
22442 validate_immediate_twopart (unsigned int val
,
22443 unsigned int * highpart
)
22448 for (i
= 0; i
< 32; i
+= 2)
22449 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22455 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22457 else if (a
& 0xff0000)
22459 if (a
& 0xff000000)
22461 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22465 gas_assert (a
& 0xff000000);
22466 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22469 return (a
& 0xff) | (i
<< 7);
22476 validate_offset_imm (unsigned int val
, int hwse
)
22478 if ((hwse
&& val
> 255) || val
> 4095)
22483 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22484 negative immediate constant by altering the instruction. A bit of
22489 by inverting the second operand, and
22492 by negating the second operand. */
22495 negate_data_op (unsigned long * instruction
,
22496 unsigned long value
)
22499 unsigned long negated
, inverted
;
22501 negated
= encode_arm_immediate (-value
);
22502 inverted
= encode_arm_immediate (~value
);
22504 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22507 /* First negates. */
22508 case OPCODE_SUB
: /* ADD <-> SUB */
22509 new_inst
= OPCODE_ADD
;
22514 new_inst
= OPCODE_SUB
;
22518 case OPCODE_CMP
: /* CMP <-> CMN */
22519 new_inst
= OPCODE_CMN
;
22524 new_inst
= OPCODE_CMP
;
22528 /* Now Inverted ops. */
22529 case OPCODE_MOV
: /* MOV <-> MVN */
22530 new_inst
= OPCODE_MVN
;
22535 new_inst
= OPCODE_MOV
;
22539 case OPCODE_AND
: /* AND <-> BIC */
22540 new_inst
= OPCODE_BIC
;
22545 new_inst
= OPCODE_AND
;
22549 case OPCODE_ADC
: /* ADC <-> SBC */
22550 new_inst
= OPCODE_SBC
;
22555 new_inst
= OPCODE_ADC
;
22559 /* We cannot do anything. */
22564 if (value
== (unsigned) FAIL
)
22567 *instruction
&= OPCODE_MASK
;
22568 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22572 /* Like negate_data_op, but for Thumb-2. */
22574 static unsigned int
22575 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22579 unsigned int negated
, inverted
;
22581 negated
= encode_thumb32_immediate (-value
);
22582 inverted
= encode_thumb32_immediate (~value
);
22584 rd
= (*instruction
>> 8) & 0xf;
22585 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22588 /* ADD <-> SUB. Includes CMP <-> CMN. */
22589 case T2_OPCODE_SUB
:
22590 new_inst
= T2_OPCODE_ADD
;
22594 case T2_OPCODE_ADD
:
22595 new_inst
= T2_OPCODE_SUB
;
22599 /* ORR <-> ORN. Includes MOV <-> MVN. */
22600 case T2_OPCODE_ORR
:
22601 new_inst
= T2_OPCODE_ORN
;
22605 case T2_OPCODE_ORN
:
22606 new_inst
= T2_OPCODE_ORR
;
22610 /* AND <-> BIC. TST has no inverted equivalent. */
22611 case T2_OPCODE_AND
:
22612 new_inst
= T2_OPCODE_BIC
;
22619 case T2_OPCODE_BIC
:
22620 new_inst
= T2_OPCODE_AND
;
22625 case T2_OPCODE_ADC
:
22626 new_inst
= T2_OPCODE_SBC
;
22630 case T2_OPCODE_SBC
:
22631 new_inst
= T2_OPCODE_ADC
;
22635 /* We cannot do anything. */
22640 if (value
== (unsigned int)FAIL
)
22643 *instruction
&= T2_OPCODE_MASK
;
22644 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22648 /* Read a 32-bit thumb instruction from buf. */
22649 static unsigned long
22650 get_thumb32_insn (char * buf
)
22652 unsigned long insn
;
22653 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22654 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22660 /* We usually want to set the low bit on the address of thumb function
22661 symbols. In particular .word foo - . should have the low bit set.
22662 Generic code tries to fold the difference of two symbols to
22663 a constant. Prevent this and force a relocation when the first symbols
22664 is a thumb function. */
22667 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22669 if (op
== O_subtract
22670 && l
->X_op
== O_symbol
22671 && r
->X_op
== O_symbol
22672 && THUMB_IS_FUNC (l
->X_add_symbol
))
22674 l
->X_op
= O_subtract
;
22675 l
->X_op_symbol
= r
->X_add_symbol
;
22676 l
->X_add_number
-= r
->X_add_number
;
22680 /* Process as normal. */
22684 /* Encode Thumb2 unconditional branches and calls. The encoding
22685 for the 2 are identical for the immediate values. */
22688 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22690 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22693 addressT S
, I1
, I2
, lo
, hi
;
22695 S
= (value
>> 24) & 0x01;
22696 I1
= (value
>> 23) & 0x01;
22697 I2
= (value
>> 22) & 0x01;
22698 hi
= (value
>> 12) & 0x3ff;
22699 lo
= (value
>> 1) & 0x7ff;
22700 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22701 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22702 newval
|= (S
<< 10) | hi
;
22703 newval2
&= ~T2I1I2MASK
;
22704 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22705 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22706 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22710 md_apply_fix (fixS
* fixP
,
22714 offsetT value
= * valP
;
22716 unsigned int newimm
;
22717 unsigned long temp
;
22719 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22721 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22723 /* Note whether this will delete the relocation. */
22725 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22728 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22729 consistency with the behaviour on 32-bit hosts. Remember value
22731 value
&= 0xffffffff;
22732 value
^= 0x80000000;
22733 value
-= 0x80000000;
22736 fixP
->fx_addnumber
= value
;
22738 /* Same treatment for fixP->fx_offset. */
22739 fixP
->fx_offset
&= 0xffffffff;
22740 fixP
->fx_offset
^= 0x80000000;
22741 fixP
->fx_offset
-= 0x80000000;
22743 switch (fixP
->fx_r_type
)
22745 case BFD_RELOC_NONE
:
22746 /* This will need to go in the object file. */
22750 case BFD_RELOC_ARM_IMMEDIATE
:
22751 /* We claim that this fixup has been processed here,
22752 even if in fact we generate an error because we do
22753 not have a reloc for it, so tc_gen_reloc will reject it. */
22756 if (fixP
->fx_addsy
)
22758 const char *msg
= 0;
22760 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22761 msg
= _("undefined symbol %s used as an immediate value");
22762 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22763 msg
= _("symbol %s is in a different section");
22764 else if (S_IS_WEAK (fixP
->fx_addsy
))
22765 msg
= _("symbol %s is weak and may be overridden later");
22769 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22770 msg
, S_GET_NAME (fixP
->fx_addsy
));
22775 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22777 /* If the offset is negative, we should use encoding A2 for ADR. */
22778 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22779 newimm
= negate_data_op (&temp
, value
);
22782 newimm
= encode_arm_immediate (value
);
22784 /* If the instruction will fail, see if we can fix things up by
22785 changing the opcode. */
22786 if (newimm
== (unsigned int) FAIL
)
22787 newimm
= negate_data_op (&temp
, value
);
22788 /* MOV accepts both ARM modified immediate (A1 encoding) and
22789 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
22790 When disassembling, MOV is preferred when there is no encoding
22792 if (newimm
== (unsigned int) FAIL
22793 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
22794 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
22795 && !((temp
>> SBIT_SHIFT
) & 0x1)
22796 && value
>= 0 && value
<= 0xffff)
22798 /* Clear bits[23:20] to change encoding from A1 to A2. */
22799 temp
&= 0xff0fffff;
22800 /* Encoding high 4bits imm. Code below will encode the remaining
22802 temp
|= (value
& 0x0000f000) << 4;
22803 newimm
= value
& 0x00000fff;
22807 if (newimm
== (unsigned int) FAIL
)
22809 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22810 _("invalid constant (%lx) after fixup"),
22811 (unsigned long) value
);
22815 newimm
|= (temp
& 0xfffff000);
22816 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22819 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22821 unsigned int highpart
= 0;
22822 unsigned int newinsn
= 0xe1a00000; /* nop. */
22824 if (fixP
->fx_addsy
)
22826 const char *msg
= 0;
22828 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22829 msg
= _("undefined symbol %s used as an immediate value");
22830 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22831 msg
= _("symbol %s is in a different section");
22832 else if (S_IS_WEAK (fixP
->fx_addsy
))
22833 msg
= _("symbol %s is weak and may be overridden later");
22837 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22838 msg
, S_GET_NAME (fixP
->fx_addsy
));
22843 newimm
= encode_arm_immediate (value
);
22844 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22846 /* If the instruction will fail, see if we can fix things up by
22847 changing the opcode. */
22848 if (newimm
== (unsigned int) FAIL
22849 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22851 /* No ? OK - try using two ADD instructions to generate
22853 newimm
= validate_immediate_twopart (value
, & highpart
);
22855 /* Yes - then make sure that the second instruction is
22857 if (newimm
!= (unsigned int) FAIL
)
22859 /* Still No ? Try using a negated value. */
22860 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22861 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22862 /* Otherwise - give up. */
22865 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22866 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22871 /* Replace the first operand in the 2nd instruction (which
22872 is the PC) with the destination register. We have
22873 already added in the PC in the first instruction and we
22874 do not want to do it again. */
22875 newinsn
&= ~ 0xf0000;
22876 newinsn
|= ((newinsn
& 0x0f000) << 4);
22879 newimm
|= (temp
& 0xfffff000);
22880 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22882 highpart
|= (newinsn
& 0xfffff000);
22883 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
22887 case BFD_RELOC_ARM_OFFSET_IMM
:
22888 if (!fixP
->fx_done
&& seg
->use_rela_p
)
22890 /* Fall through. */
22892 case BFD_RELOC_ARM_LITERAL
:
22898 if (validate_offset_imm (value
, 0) == FAIL
)
22900 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
22901 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22902 _("invalid literal constant: pool needs to be closer"));
22904 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22905 _("bad immediate value for offset (%ld)"),
22910 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22912 newval
&= 0xfffff000;
22915 newval
&= 0xff7ff000;
22916 newval
|= value
| (sign
? INDEX_UP
: 0);
22918 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22921 case BFD_RELOC_ARM_OFFSET_IMM8
:
22922 case BFD_RELOC_ARM_HWLITERAL
:
22928 if (validate_offset_imm (value
, 1) == FAIL
)
22930 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
22931 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22932 _("invalid literal constant: pool needs to be closer"));
22934 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22935 _("bad immediate value for 8-bit offset (%ld)"),
22940 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22942 newval
&= 0xfffff0f0;
22945 newval
&= 0xff7ff0f0;
22946 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
22948 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22951 case BFD_RELOC_ARM_T32_OFFSET_U8
:
22952 if (value
< 0 || value
> 1020 || value
% 4 != 0)
22953 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22954 _("bad immediate value for offset (%ld)"), (long) value
);
22957 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
22959 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
22962 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22963 /* This is a complicated relocation used for all varieties of Thumb32
22964 load/store instruction with immediate offset:
22966 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22967 *4, optional writeback(W)
22968 (doubleword load/store)
22970 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22971 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22972 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22973 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22974 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22976 Uppercase letters indicate bits that are already encoded at
22977 this point. Lowercase letters are our problem. For the
22978 second block of instructions, the secondary opcode nybble
22979 (bits 8..11) is present, and bit 23 is zero, even if this is
22980 a PC-relative operation. */
22981 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22983 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
22985 if ((newval
& 0xf0000000) == 0xe0000000)
22987 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22989 newval
|= (1 << 23);
22992 if (value
% 4 != 0)
22994 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22995 _("offset not a multiple of 4"));
23001 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23002 _("offset out of range"));
23007 else if ((newval
& 0x000f0000) == 0x000f0000)
23009 /* PC-relative, 12-bit offset. */
23011 newval
|= (1 << 23);
23016 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23017 _("offset out of range"));
23022 else if ((newval
& 0x00000100) == 0x00000100)
23024 /* Writeback: 8-bit, +/- offset. */
23026 newval
|= (1 << 9);
23031 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23032 _("offset out of range"));
23037 else if ((newval
& 0x00000f00) == 0x00000e00)
23039 /* T-instruction: positive 8-bit offset. */
23040 if (value
< 0 || value
> 0xff)
23042 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23043 _("offset out of range"));
23051 /* Positive 12-bit or negative 8-bit offset. */
23055 newval
|= (1 << 23);
23065 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23066 _("offset out of range"));
23073 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
23074 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
23077 case BFD_RELOC_ARM_SHIFT_IMM
:
23078 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23079 if (((unsigned long) value
) > 32
23081 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
23083 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23084 _("shift expression is too large"));
23089 /* Shifts of zero must be done as lsl. */
23091 else if (value
== 32)
23093 newval
&= 0xfffff07f;
23094 newval
|= (value
& 0x1f) << 7;
23095 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23098 case BFD_RELOC_ARM_T32_IMMEDIATE
:
23099 case BFD_RELOC_ARM_T32_ADD_IMM
:
23100 case BFD_RELOC_ARM_T32_IMM12
:
23101 case BFD_RELOC_ARM_T32_ADD_PC12
:
23102 /* We claim that this fixup has been processed here,
23103 even if in fact we generate an error because we do
23104 not have a reloc for it, so tc_gen_reloc will reject it. */
23108 && ! S_IS_DEFINED (fixP
->fx_addsy
))
23110 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23111 _("undefined symbol %s used as an immediate value"),
23112 S_GET_NAME (fixP
->fx_addsy
));
23116 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23118 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
23121 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
23122 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23123 Thumb2 modified immediate encoding (T2). */
23124 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
23125 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23127 newimm
= encode_thumb32_immediate (value
);
23128 if (newimm
== (unsigned int) FAIL
)
23129 newimm
= thumb32_negate_data_op (&newval
, value
);
23131 if (newimm
== (unsigned int) FAIL
)
23133 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
23135 /* Turn add/sum into addw/subw. */
23136 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23137 newval
= (newval
& 0xfeffffff) | 0x02000000;
23138 /* No flat 12-bit imm encoding for addsw/subsw. */
23139 if ((newval
& 0x00100000) == 0)
23141 /* 12 bit immediate for addw/subw. */
23145 newval
^= 0x00a00000;
23148 newimm
= (unsigned int) FAIL
;
23155 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23156 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23157 disassembling, MOV is preferred when there is no encoding
23159 NOTE: MOV is using ORR opcode under Thumb 2 mode. */
23160 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
23161 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
23162 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
23163 && value
>= 0 && value
<=0xffff)
23165 /* Toggle bit[25] to change encoding from T2 to T3. */
23167 /* Clear bits[19:16]. */
23168 newval
&= 0xfff0ffff;
23169 /* Encoding high 4bits imm. Code below will encode the
23170 remaining low 12bits. */
23171 newval
|= (value
& 0x0000f000) << 4;
23172 newimm
= value
& 0x00000fff;
23177 if (newimm
== (unsigned int)FAIL
)
23179 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23180 _("invalid constant (%lx) after fixup"),
23181 (unsigned long) value
);
23185 newval
|= (newimm
& 0x800) << 15;
23186 newval
|= (newimm
& 0x700) << 4;
23187 newval
|= (newimm
& 0x0ff);
23189 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
23190 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
23193 case BFD_RELOC_ARM_SMC
:
23194 if (((unsigned long) value
) > 0xffff)
23195 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23196 _("invalid smc expression"));
23197 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23198 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23199 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23202 case BFD_RELOC_ARM_HVC
:
23203 if (((unsigned long) value
) > 0xffff)
23204 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23205 _("invalid hvc expression"));
23206 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23207 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23208 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23211 case BFD_RELOC_ARM_SWI
:
23212 if (fixP
->tc_fix_data
!= 0)
23214 if (((unsigned long) value
) > 0xff)
23215 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23216 _("invalid swi expression"));
23217 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23219 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23223 if (((unsigned long) value
) > 0x00ffffff)
23224 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23225 _("invalid swi expression"));
23226 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23228 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23232 case BFD_RELOC_ARM_MULTI
:
23233 if (((unsigned long) value
) > 0xffff)
23234 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23235 _("invalid expression in load/store multiple"));
23236 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
23237 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23241 case BFD_RELOC_ARM_PCREL_CALL
:
23243 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23245 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23246 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23247 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23248 /* Flip the bl to blx. This is a simple flip
23249 bit here because we generate PCREL_CALL for
23250 unconditional bls. */
23252 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23253 newval
= newval
| 0x10000000;
23254 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23260 goto arm_branch_common
;
23262 case BFD_RELOC_ARM_PCREL_JUMP
:
23263 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23265 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23266 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23267 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23269 /* This would map to a bl<cond>, b<cond>,
23270 b<always> to a Thumb function. We
23271 need to force a relocation for this particular
23273 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23276 /* Fall through. */
23278 case BFD_RELOC_ARM_PLT32
:
23280 case BFD_RELOC_ARM_PCREL_BRANCH
:
23282 goto arm_branch_common
;
23284 case BFD_RELOC_ARM_PCREL_BLX
:
23287 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23289 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23290 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23291 && ARM_IS_FUNC (fixP
->fx_addsy
))
23293 /* Flip the blx to a bl and warn. */
23294 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23295 newval
= 0xeb000000;
23296 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23297 _("blx to '%s' an ARM ISA state function changed to bl"),
23299 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23305 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23306 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
23310 /* We are going to store value (shifted right by two) in the
23311 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23312 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23313 also be be clear. */
23315 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23316 _("misaligned branch destination"));
23317 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
23318 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
23319 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23321 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23323 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23324 newval
|= (value
>> 2) & 0x00ffffff;
23325 /* Set the H bit on BLX instructions. */
23329 newval
|= 0x01000000;
23331 newval
&= ~0x01000000;
23333 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23337 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
23338 /* CBZ can only branch forward. */
23340 /* Attempts to use CBZ to branch to the next instruction
23341 (which, strictly speaking, are prohibited) will be turned into
23344 FIXME: It may be better to remove the instruction completely and
23345 perform relaxation. */
23348 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23349 newval
= 0xbf00; /* NOP encoding T1 */
23350 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23355 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23357 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23359 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23360 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
23361 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23366 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
23367 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
23368 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23370 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23372 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23373 newval
|= (value
& 0x1ff) >> 1;
23374 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23378 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
23379 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
23380 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23382 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23384 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23385 newval
|= (value
& 0xfff) >> 1;
23386 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23390 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23392 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23393 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23394 && ARM_IS_FUNC (fixP
->fx_addsy
)
23395 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23397 /* Force a relocation for a branch 20 bits wide. */
23400 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
23401 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23402 _("conditional branch out of range"));
23404 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23407 addressT S
, J1
, J2
, lo
, hi
;
23409 S
= (value
& 0x00100000) >> 20;
23410 J2
= (value
& 0x00080000) >> 19;
23411 J1
= (value
& 0x00040000) >> 18;
23412 hi
= (value
& 0x0003f000) >> 12;
23413 lo
= (value
& 0x00000ffe) >> 1;
23415 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23416 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23417 newval
|= (S
<< 10) | hi
;
23418 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
23419 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23420 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23424 case BFD_RELOC_THUMB_PCREL_BLX
:
23425 /* If there is a blx from a thumb state function to
23426 another thumb function flip this to a bl and warn
23430 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23431 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23432 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23434 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23435 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23436 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23438 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23439 newval
= newval
| 0x1000;
23440 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23441 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23446 goto thumb_bl_common
;
23448 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23449 /* A bl from Thumb state ISA to an internal ARM state function
23450 is converted to a blx. */
23452 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23453 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23454 && ARM_IS_FUNC (fixP
->fx_addsy
)
23455 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23457 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23458 newval
= newval
& ~0x1000;
23459 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23460 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
23466 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23467 /* For a BLX instruction, make sure that the relocation is rounded up
23468 to a word boundary. This follows the semantics of the instruction
23469 which specifies that bit 1 of the target address will come from bit
23470 1 of the base address. */
23471 value
= (value
+ 3) & ~ 3;
23474 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
23475 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23476 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23479 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
23481 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
23482 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23483 else if ((value
& ~0x1ffffff)
23484 && ((value
& ~0x1ffffff) != ~0x1ffffff))
23485 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23486 _("Thumb2 branch out of range"));
23489 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23490 encode_thumb2_b_bl_offset (buf
, value
);
23494 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23495 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
23496 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23498 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23499 encode_thumb2_b_bl_offset (buf
, value
);
23504 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23509 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23510 md_number_to_chars (buf
, value
, 2);
23514 case BFD_RELOC_ARM_TLS_CALL
:
23515 case BFD_RELOC_ARM_THM_TLS_CALL
:
23516 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23517 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23518 case BFD_RELOC_ARM_TLS_GOTDESC
:
23519 case BFD_RELOC_ARM_TLS_GD32
:
23520 case BFD_RELOC_ARM_TLS_LE32
:
23521 case BFD_RELOC_ARM_TLS_IE32
:
23522 case BFD_RELOC_ARM_TLS_LDM32
:
23523 case BFD_RELOC_ARM_TLS_LDO32
:
23524 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23527 case BFD_RELOC_ARM_GOT32
:
23528 case BFD_RELOC_ARM_GOTOFF
:
23531 case BFD_RELOC_ARM_GOT_PREL
:
23532 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23533 md_number_to_chars (buf
, value
, 4);
23536 case BFD_RELOC_ARM_TARGET2
:
23537 /* TARGET2 is not partial-inplace, so we need to write the
23538 addend here for REL targets, because it won't be written out
23539 during reloc processing later. */
23540 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23541 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23545 case BFD_RELOC_RVA
:
23547 case BFD_RELOC_ARM_TARGET1
:
23548 case BFD_RELOC_ARM_ROSEGREL32
:
23549 case BFD_RELOC_ARM_SBREL32
:
23550 case BFD_RELOC_32_PCREL
:
23552 case BFD_RELOC_32_SECREL
:
23554 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23556 /* For WinCE we only do this for pcrel fixups. */
23557 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23559 md_number_to_chars (buf
, value
, 4);
23563 case BFD_RELOC_ARM_PREL31
:
23564 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23566 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23567 if ((value
^ (value
>> 1)) & 0x40000000)
23569 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23570 _("rel31 relocation overflow"));
23572 newval
|= value
& 0x7fffffff;
23573 md_number_to_chars (buf
, newval
, 4);
23578 case BFD_RELOC_ARM_CP_OFF_IMM
:
23579 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23580 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
23581 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23583 newval
= get_thumb32_insn (buf
);
23584 if ((newval
& 0x0f200f00) == 0x0d000900)
23586 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23587 has permitted values that are multiples of 2, in the range 0
23589 if (value
< -510 || value
> 510 || (value
& 1))
23590 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23591 _("co-processor offset out of range"));
23593 else if (value
< -1023 || value
> 1023 || (value
& 3))
23594 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23595 _("co-processor offset out of range"));
23600 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23601 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23602 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23604 newval
= get_thumb32_insn (buf
);
23606 newval
&= 0xffffff00;
23609 newval
&= 0xff7fff00;
23610 if ((newval
& 0x0f200f00) == 0x0d000900)
23612 /* This is a fp16 vstr/vldr.
23614 It requires the immediate offset in the instruction is shifted
23615 left by 1 to be a half-word offset.
23617 Here, left shift by 1 first, and later right shift by 2
23618 should get the right offset. */
23621 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23623 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23624 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23625 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23627 put_thumb32_insn (buf
, newval
);
23630 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23631 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23632 if (value
< -255 || value
> 255)
23633 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23634 _("co-processor offset out of range"));
23636 goto cp_off_common
;
23638 case BFD_RELOC_ARM_THUMB_OFFSET
:
23639 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23640 /* Exactly what ranges, and where the offset is inserted depends
23641 on the type of instruction, we can establish this from the
23643 switch (newval
>> 12)
23645 case 4: /* PC load. */
23646 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23647 forced to zero for these loads; md_pcrel_from has already
23648 compensated for this. */
23650 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23651 _("invalid offset, target not word aligned (0x%08lX)"),
23652 (((unsigned long) fixP
->fx_frag
->fr_address
23653 + (unsigned long) fixP
->fx_where
) & ~3)
23654 + (unsigned long) value
);
23656 if (value
& ~0x3fc)
23657 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23658 _("invalid offset, value too big (0x%08lX)"),
23661 newval
|= value
>> 2;
23664 case 9: /* SP load/store. */
23665 if (value
& ~0x3fc)
23666 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23667 _("invalid offset, value too big (0x%08lX)"),
23669 newval
|= value
>> 2;
23672 case 6: /* Word load/store. */
23674 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23675 _("invalid offset, value too big (0x%08lX)"),
23677 newval
|= value
<< 4; /* 6 - 2. */
23680 case 7: /* Byte load/store. */
23682 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23683 _("invalid offset, value too big (0x%08lX)"),
23685 newval
|= value
<< 6;
23688 case 8: /* Halfword load/store. */
23690 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23691 _("invalid offset, value too big (0x%08lX)"),
23693 newval
|= value
<< 5; /* 6 - 1. */
23697 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23698 "Unable to process relocation for thumb opcode: %lx",
23699 (unsigned long) newval
);
23702 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23705 case BFD_RELOC_ARM_THUMB_ADD
:
23706 /* This is a complicated relocation, since we use it for all of
23707 the following immediate relocations:
23711 9bit ADD/SUB SP word-aligned
23712 10bit ADD PC/SP word-aligned
23714 The type of instruction being processed is encoded in the
23721 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23723 int rd
= (newval
>> 4) & 0xf;
23724 int rs
= newval
& 0xf;
23725 int subtract
= !!(newval
& 0x8000);
23727 /* Check for HI regs, only very restricted cases allowed:
23728 Adjusting SP, and using PC or SP to get an address. */
23729 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23730 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23731 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23732 _("invalid Hi register with immediate"));
23734 /* If value is negative, choose the opposite instruction. */
23738 subtract
= !subtract
;
23740 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23741 _("immediate value out of range"));
23746 if (value
& ~0x1fc)
23747 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23748 _("invalid immediate for stack address calculation"));
23749 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23750 newval
|= value
>> 2;
23752 else if (rs
== REG_PC
|| rs
== REG_SP
)
23754 /* PR gas/18541. If the addition is for a defined symbol
23755 within range of an ADR instruction then accept it. */
23758 && fixP
->fx_addsy
!= NULL
)
23762 if (! S_IS_DEFINED (fixP
->fx_addsy
)
23763 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
23764 || S_IS_WEAK (fixP
->fx_addsy
))
23766 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23767 _("address calculation needs a strongly defined nearby symbol"));
23771 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23773 /* Round up to the next 4-byte boundary. */
23778 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
23782 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23783 _("symbol too far away"));
23793 if (subtract
|| value
& ~0x3fc)
23794 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23795 _("invalid immediate for address calculation (value = 0x%08lX)"),
23796 (unsigned long) (subtract
? - value
: value
));
23797 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
23799 newval
|= value
>> 2;
23804 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23805 _("immediate value out of range"));
23806 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
23807 newval
|= (rd
<< 8) | value
;
23812 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23813 _("immediate value out of range"));
23814 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
23815 newval
|= rd
| (rs
<< 3) | (value
<< 6);
23818 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23821 case BFD_RELOC_ARM_THUMB_IMM
:
23822 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23823 if (value
< 0 || value
> 255)
23824 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23825 _("invalid immediate: %ld is out of range"),
23828 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23831 case BFD_RELOC_ARM_THUMB_SHIFT
:
23832 /* 5bit shift value (0..32). LSL cannot take 32. */
23833 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
23834 temp
= newval
& 0xf800;
23835 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
23836 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23837 _("invalid shift value: %ld"), (long) value
);
23838 /* Shifts of zero must be encoded as LSL. */
23840 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
23841 /* Shifts of 32 are encoded as zero. */
23842 else if (value
== 32)
23844 newval
|= value
<< 6;
23845 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23848 case BFD_RELOC_VTABLE_INHERIT
:
23849 case BFD_RELOC_VTABLE_ENTRY
:
23853 case BFD_RELOC_ARM_MOVW
:
23854 case BFD_RELOC_ARM_MOVT
:
23855 case BFD_RELOC_ARM_THUMB_MOVW
:
23856 case BFD_RELOC_ARM_THUMB_MOVT
:
23857 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23859 /* REL format relocations are limited to a 16-bit addend. */
23860 if (!fixP
->fx_done
)
23862 if (value
< -0x8000 || value
> 0x7fff)
23863 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23864 _("offset out of range"));
23866 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23867 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23872 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23873 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23875 newval
= get_thumb32_insn (buf
);
23876 newval
&= 0xfbf08f00;
23877 newval
|= (value
& 0xf000) << 4;
23878 newval
|= (value
& 0x0800) << 15;
23879 newval
|= (value
& 0x0700) << 4;
23880 newval
|= (value
& 0x00ff);
23881 put_thumb32_insn (buf
, newval
);
23885 newval
= md_chars_to_number (buf
, 4);
23886 newval
&= 0xfff0f000;
23887 newval
|= value
& 0x0fff;
23888 newval
|= (value
& 0xf000) << 4;
23889 md_number_to_chars (buf
, newval
, 4);
23894 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23895 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23896 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23897 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23898 gas_assert (!fixP
->fx_done
);
23901 bfd_boolean is_mov
;
23902 bfd_vma encoded_addend
= value
;
23904 /* Check that addend can be encoded in instruction. */
23905 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
23906 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23907 _("the offset 0x%08lX is not representable"),
23908 (unsigned long) encoded_addend
);
23910 /* Extract the instruction. */
23911 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
23912 is_mov
= (insn
& 0xf800) == 0x2000;
23917 if (!seg
->use_rela_p
)
23918 insn
|= encoded_addend
;
23924 /* Extract the instruction. */
23925 /* Encoding is the following
23930 /* The following conditions must be true :
23935 rd
= (insn
>> 4) & 0xf;
23937 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
23938 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23939 _("Unable to process relocation for thumb opcode: %lx"),
23940 (unsigned long) insn
);
23942 /* Encode as ADD immediate8 thumb 1 code. */
23943 insn
= 0x3000 | (rd
<< 8);
23945 /* Place the encoded addend into the first 8 bits of the
23947 if (!seg
->use_rela_p
)
23948 insn
|= encoded_addend
;
23951 /* Update the instruction. */
23952 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
23956 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23957 case BFD_RELOC_ARM_ALU_PC_G0
:
23958 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23959 case BFD_RELOC_ARM_ALU_PC_G1
:
23960 case BFD_RELOC_ARM_ALU_PC_G2
:
23961 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23962 case BFD_RELOC_ARM_ALU_SB_G0
:
23963 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23964 case BFD_RELOC_ARM_ALU_SB_G1
:
23965 case BFD_RELOC_ARM_ALU_SB_G2
:
23966 gas_assert (!fixP
->fx_done
);
23967 if (!seg
->use_rela_p
)
23970 bfd_vma encoded_addend
;
23971 bfd_vma addend_abs
= abs (value
);
23973 /* Check that the absolute value of the addend can be
23974 expressed as an 8-bit constant plus a rotation. */
23975 encoded_addend
= encode_arm_immediate (addend_abs
);
23976 if (encoded_addend
== (unsigned int) FAIL
)
23977 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23978 _("the offset 0x%08lX is not representable"),
23979 (unsigned long) addend_abs
);
23981 /* Extract the instruction. */
23982 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23984 /* If the addend is positive, use an ADD instruction.
23985 Otherwise use a SUB. Take care not to destroy the S bit. */
23986 insn
&= 0xff1fffff;
23992 /* Place the encoded addend into the first 12 bits of the
23994 insn
&= 0xfffff000;
23995 insn
|= encoded_addend
;
23997 /* Update the instruction. */
23998 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24002 case BFD_RELOC_ARM_LDR_PC_G0
:
24003 case BFD_RELOC_ARM_LDR_PC_G1
:
24004 case BFD_RELOC_ARM_LDR_PC_G2
:
24005 case BFD_RELOC_ARM_LDR_SB_G0
:
24006 case BFD_RELOC_ARM_LDR_SB_G1
:
24007 case BFD_RELOC_ARM_LDR_SB_G2
:
24008 gas_assert (!fixP
->fx_done
);
24009 if (!seg
->use_rela_p
)
24012 bfd_vma addend_abs
= abs (value
);
24014 /* Check that the absolute value of the addend can be
24015 encoded in 12 bits. */
24016 if (addend_abs
>= 0x1000)
24017 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24018 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24019 (unsigned long) addend_abs
);
24021 /* Extract the instruction. */
24022 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24024 /* If the addend is negative, clear bit 23 of the instruction.
24025 Otherwise set it. */
24027 insn
&= ~(1 << 23);
24031 /* Place the absolute value of the addend into the first 12 bits
24032 of the instruction. */
24033 insn
&= 0xfffff000;
24034 insn
|= addend_abs
;
24036 /* Update the instruction. */
24037 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24041 case BFD_RELOC_ARM_LDRS_PC_G0
:
24042 case BFD_RELOC_ARM_LDRS_PC_G1
:
24043 case BFD_RELOC_ARM_LDRS_PC_G2
:
24044 case BFD_RELOC_ARM_LDRS_SB_G0
:
24045 case BFD_RELOC_ARM_LDRS_SB_G1
:
24046 case BFD_RELOC_ARM_LDRS_SB_G2
:
24047 gas_assert (!fixP
->fx_done
);
24048 if (!seg
->use_rela_p
)
24051 bfd_vma addend_abs
= abs (value
);
24053 /* Check that the absolute value of the addend can be
24054 encoded in 8 bits. */
24055 if (addend_abs
>= 0x100)
24056 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24057 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24058 (unsigned long) addend_abs
);
24060 /* Extract the instruction. */
24061 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24063 /* If the addend is negative, clear bit 23 of the instruction.
24064 Otherwise set it. */
24066 insn
&= ~(1 << 23);
24070 /* Place the first four bits of the absolute value of the addend
24071 into the first 4 bits of the instruction, and the remaining
24072 four into bits 8 .. 11. */
24073 insn
&= 0xfffff0f0;
24074 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
24076 /* Update the instruction. */
24077 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24081 case BFD_RELOC_ARM_LDC_PC_G0
:
24082 case BFD_RELOC_ARM_LDC_PC_G1
:
24083 case BFD_RELOC_ARM_LDC_PC_G2
:
24084 case BFD_RELOC_ARM_LDC_SB_G0
:
24085 case BFD_RELOC_ARM_LDC_SB_G1
:
24086 case BFD_RELOC_ARM_LDC_SB_G2
:
24087 gas_assert (!fixP
->fx_done
);
24088 if (!seg
->use_rela_p
)
24091 bfd_vma addend_abs
= abs (value
);
24093 /* Check that the absolute value of the addend is a multiple of
24094 four and, when divided by four, fits in 8 bits. */
24095 if (addend_abs
& 0x3)
24096 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24097 _("bad offset 0x%08lX (must be word-aligned)"),
24098 (unsigned long) addend_abs
);
24100 if ((addend_abs
>> 2) > 0xff)
24101 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24102 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24103 (unsigned long) addend_abs
);
24105 /* Extract the instruction. */
24106 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24108 /* If the addend is negative, clear bit 23 of the instruction.
24109 Otherwise set it. */
24111 insn
&= ~(1 << 23);
24115 /* Place the addend (divided by four) into the first eight
24116 bits of the instruction. */
24117 insn
&= 0xfffffff0;
24118 insn
|= addend_abs
>> 2;
24120 /* Update the instruction. */
24121 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24125 case BFD_RELOC_ARM_V4BX
:
24126 /* This will need to go in the object file. */
24130 case BFD_RELOC_UNUSED
:
24132 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24133 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
24137 /* Translate internal representation of relocation info to BFD target
24141 tc_gen_reloc (asection
*section
, fixS
*fixp
)
24144 bfd_reloc_code_real_type code
;
24146 reloc
= XNEW (arelent
);
24148 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
24149 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
24150 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
24152 if (fixp
->fx_pcrel
)
24154 if (section
->use_rela_p
)
24155 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
24157 fixp
->fx_offset
= reloc
->address
;
24159 reloc
->addend
= fixp
->fx_offset
;
24161 switch (fixp
->fx_r_type
)
24164 if (fixp
->fx_pcrel
)
24166 code
= BFD_RELOC_8_PCREL
;
24169 /* Fall through. */
24172 if (fixp
->fx_pcrel
)
24174 code
= BFD_RELOC_16_PCREL
;
24177 /* Fall through. */
24180 if (fixp
->fx_pcrel
)
24182 code
= BFD_RELOC_32_PCREL
;
24185 /* Fall through. */
24187 case BFD_RELOC_ARM_MOVW
:
24188 if (fixp
->fx_pcrel
)
24190 code
= BFD_RELOC_ARM_MOVW_PCREL
;
24193 /* Fall through. */
24195 case BFD_RELOC_ARM_MOVT
:
24196 if (fixp
->fx_pcrel
)
24198 code
= BFD_RELOC_ARM_MOVT_PCREL
;
24201 /* Fall through. */
24203 case BFD_RELOC_ARM_THUMB_MOVW
:
24204 if (fixp
->fx_pcrel
)
24206 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
24209 /* Fall through. */
24211 case BFD_RELOC_ARM_THUMB_MOVT
:
24212 if (fixp
->fx_pcrel
)
24214 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
24217 /* Fall through. */
24219 case BFD_RELOC_NONE
:
24220 case BFD_RELOC_ARM_PCREL_BRANCH
:
24221 case BFD_RELOC_ARM_PCREL_BLX
:
24222 case BFD_RELOC_RVA
:
24223 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24224 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24225 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24226 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24227 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24228 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24229 case BFD_RELOC_VTABLE_ENTRY
:
24230 case BFD_RELOC_VTABLE_INHERIT
:
24232 case BFD_RELOC_32_SECREL
:
24234 code
= fixp
->fx_r_type
;
24237 case BFD_RELOC_THUMB_PCREL_BLX
:
24239 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
24240 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24243 code
= BFD_RELOC_THUMB_PCREL_BLX
;
24246 case BFD_RELOC_ARM_LITERAL
:
24247 case BFD_RELOC_ARM_HWLITERAL
:
24248 /* If this is called then the a literal has
24249 been referenced across a section boundary. */
24250 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24251 _("literal referenced across section boundary"));
24255 case BFD_RELOC_ARM_TLS_CALL
:
24256 case BFD_RELOC_ARM_THM_TLS_CALL
:
24257 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24258 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24259 case BFD_RELOC_ARM_GOT32
:
24260 case BFD_RELOC_ARM_GOTOFF
:
24261 case BFD_RELOC_ARM_GOT_PREL
:
24262 case BFD_RELOC_ARM_PLT32
:
24263 case BFD_RELOC_ARM_TARGET1
:
24264 case BFD_RELOC_ARM_ROSEGREL32
:
24265 case BFD_RELOC_ARM_SBREL32
:
24266 case BFD_RELOC_ARM_PREL31
:
24267 case BFD_RELOC_ARM_TARGET2
:
24268 case BFD_RELOC_ARM_TLS_LDO32
:
24269 case BFD_RELOC_ARM_PCREL_CALL
:
24270 case BFD_RELOC_ARM_PCREL_JUMP
:
24271 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
24272 case BFD_RELOC_ARM_ALU_PC_G0
:
24273 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
24274 case BFD_RELOC_ARM_ALU_PC_G1
:
24275 case BFD_RELOC_ARM_ALU_PC_G2
:
24276 case BFD_RELOC_ARM_LDR_PC_G0
:
24277 case BFD_RELOC_ARM_LDR_PC_G1
:
24278 case BFD_RELOC_ARM_LDR_PC_G2
:
24279 case BFD_RELOC_ARM_LDRS_PC_G0
:
24280 case BFD_RELOC_ARM_LDRS_PC_G1
:
24281 case BFD_RELOC_ARM_LDRS_PC_G2
:
24282 case BFD_RELOC_ARM_LDC_PC_G0
:
24283 case BFD_RELOC_ARM_LDC_PC_G1
:
24284 case BFD_RELOC_ARM_LDC_PC_G2
:
24285 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
24286 case BFD_RELOC_ARM_ALU_SB_G0
:
24287 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
24288 case BFD_RELOC_ARM_ALU_SB_G1
:
24289 case BFD_RELOC_ARM_ALU_SB_G2
:
24290 case BFD_RELOC_ARM_LDR_SB_G0
:
24291 case BFD_RELOC_ARM_LDR_SB_G1
:
24292 case BFD_RELOC_ARM_LDR_SB_G2
:
24293 case BFD_RELOC_ARM_LDRS_SB_G0
:
24294 case BFD_RELOC_ARM_LDRS_SB_G1
:
24295 case BFD_RELOC_ARM_LDRS_SB_G2
:
24296 case BFD_RELOC_ARM_LDC_SB_G0
:
24297 case BFD_RELOC_ARM_LDC_SB_G1
:
24298 case BFD_RELOC_ARM_LDC_SB_G2
:
24299 case BFD_RELOC_ARM_V4BX
:
24300 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24301 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24302 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24303 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24304 code
= fixp
->fx_r_type
;
24307 case BFD_RELOC_ARM_TLS_GOTDESC
:
24308 case BFD_RELOC_ARM_TLS_GD32
:
24309 case BFD_RELOC_ARM_TLS_LE32
:
24310 case BFD_RELOC_ARM_TLS_IE32
:
24311 case BFD_RELOC_ARM_TLS_LDM32
:
24312 /* BFD will include the symbol's address in the addend.
24313 But we don't want that, so subtract it out again here. */
24314 if (!S_IS_COMMON (fixp
->fx_addsy
))
24315 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
24316 code
= fixp
->fx_r_type
;
24320 case BFD_RELOC_ARM_IMMEDIATE
:
24321 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24322 _("internal relocation (type: IMMEDIATE) not fixed up"));
24325 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
24326 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24327 _("ADRL used for a symbol not defined in the same file"));
24330 case BFD_RELOC_ARM_OFFSET_IMM
:
24331 if (section
->use_rela_p
)
24333 code
= fixp
->fx_r_type
;
24337 if (fixp
->fx_addsy
!= NULL
24338 && !S_IS_DEFINED (fixp
->fx_addsy
)
24339 && S_IS_LOCAL (fixp
->fx_addsy
))
24341 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24342 _("undefined local label `%s'"),
24343 S_GET_NAME (fixp
->fx_addsy
));
24347 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24348 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24355 switch (fixp
->fx_r_type
)
24357 case BFD_RELOC_NONE
: type
= "NONE"; break;
24358 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
24359 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
24360 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
24361 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
24362 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
24363 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
24364 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
24365 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
24366 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
24367 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
24368 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
24369 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
24370 default: type
= _("<unknown>"); break;
24372 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24373 _("cannot represent %s relocation in this object file format"),
24380 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
24382 && fixp
->fx_addsy
== GOT_symbol
)
24384 code
= BFD_RELOC_ARM_GOTPC
;
24385 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
24389 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
24391 if (reloc
->howto
== NULL
)
24393 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24394 _("cannot represent %s relocation in this object file format"),
24395 bfd_get_reloc_code_name (code
));
24399 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24400 vtable entry to be used in the relocation's section offset. */
24401 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24402 reloc
->address
= fixp
->fx_offset
;
24407 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24410 cons_fix_new_arm (fragS
* frag
,
24414 bfd_reloc_code_real_type reloc
)
24419 FIXME: @@ Should look at CPU word size. */
24423 reloc
= BFD_RELOC_8
;
24426 reloc
= BFD_RELOC_16
;
24430 reloc
= BFD_RELOC_32
;
24433 reloc
= BFD_RELOC_64
;
24438 if (exp
->X_op
== O_secrel
)
24440 exp
->X_op
= O_symbol
;
24441 reloc
= BFD_RELOC_32_SECREL
;
24445 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
24448 #if defined (OBJ_COFF)
24450 arm_validate_fix (fixS
* fixP
)
24452 /* If the destination of the branch is a defined symbol which does not have
24453 the THUMB_FUNC attribute, then we must be calling a function which has
24454 the (interfacearm) attribute. We look for the Thumb entry point to that
24455 function and change the branch to refer to that function instead. */
24456 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
24457 && fixP
->fx_addsy
!= NULL
24458 && S_IS_DEFINED (fixP
->fx_addsy
)
24459 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
24461 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
24468 arm_force_relocation (struct fix
* fixp
)
24470 #if defined (OBJ_COFF) && defined (TE_PE)
24471 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
24475 /* In case we have a call or a branch to a function in ARM ISA mode from
24476 a thumb function or vice-versa force the relocation. These relocations
24477 are cleared off for some cores that might have blx and simple transformations
24481 switch (fixp
->fx_r_type
)
24483 case BFD_RELOC_ARM_PCREL_JUMP
:
24484 case BFD_RELOC_ARM_PCREL_CALL
:
24485 case BFD_RELOC_THUMB_PCREL_BLX
:
24486 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
24490 case BFD_RELOC_ARM_PCREL_BLX
:
24491 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24492 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24493 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24494 if (ARM_IS_FUNC (fixp
->fx_addsy
))
24503 /* Resolve these relocations even if the symbol is extern or weak.
24504 Technically this is probably wrong due to symbol preemption.
24505 In practice these relocations do not have enough range to be useful
24506 at dynamic link time, and some code (e.g. in the Linux kernel)
24507 expects these references to be resolved. */
24508 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
24509 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
24510 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
24511 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
24512 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24513 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
24514 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
24515 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
24516 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24517 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
24518 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
24519 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
24520 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
24521 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
24524 /* Always leave these relocations for the linker. */
24525 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24526 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24527 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24530 /* Always generate relocations against function symbols. */
24531 if (fixp
->fx_r_type
== BFD_RELOC_32
24533 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24536 return generic_force_reloc (fixp
);
24539 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24540 /* Relocations against function names must be left unadjusted,
24541 so that the linker can use this information to generate interworking
24542 stubs. The MIPS version of this function
24543 also prevents relocations that are mips-16 specific, but I do not
24544 know why it does this.
24547 There is one other problem that ought to be addressed here, but
24548 which currently is not: Taking the address of a label (rather
24549 than a function) and then later jumping to that address. Such
24550 addresses also ought to have their bottom bit set (assuming that
24551 they reside in Thumb code), but at the moment they will not. */
24554 arm_fix_adjustable (fixS
* fixP
)
24556 if (fixP
->fx_addsy
== NULL
)
24559 /* Preserve relocations against symbols with function type. */
24560 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24563 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24564 && fixP
->fx_subsy
== NULL
)
24567 /* We need the symbol name for the VTABLE entries. */
24568 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24569 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24572 /* Don't allow symbols to be discarded on GOT related relocs. */
24573 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24574 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24575 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24576 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24577 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24578 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24579 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24580 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24581 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24582 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24583 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24584 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24585 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24586 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24589 /* Similarly for group relocations. */
24590 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24591 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24592 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24595 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24596 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24597 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24598 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24599 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24600 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24601 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24602 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24603 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24606 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24607 offsets, so keep these symbols. */
24608 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24609 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24614 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24618 elf32_arm_target_format (void)
24621 return (target_big_endian
24622 ? "elf32-bigarm-symbian"
24623 : "elf32-littlearm-symbian");
24624 #elif defined (TE_VXWORKS)
24625 return (target_big_endian
24626 ? "elf32-bigarm-vxworks"
24627 : "elf32-littlearm-vxworks");
24628 #elif defined (TE_NACL)
24629 return (target_big_endian
24630 ? "elf32-bigarm-nacl"
24631 : "elf32-littlearm-nacl");
24633 if (target_big_endian
)
24634 return "elf32-bigarm";
24636 return "elf32-littlearm";
24641 armelf_frob_symbol (symbolS
* symp
,
24644 elf_frob_symbol (symp
, puntp
);
24648 /* MD interface: Finalization. */
24653 literal_pool
* pool
;
24655 /* Ensure that all the IT blocks are properly closed. */
24656 check_it_blocks_finished ();
24658 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24660 /* Put it at the end of the relevant section. */
24661 subseg_set (pool
->section
, pool
->sub_section
);
24663 arm_elf_change_section ();
24670 /* Remove any excess mapping symbols generated for alignment frags in
24671 SEC. We may have created a mapping symbol before a zero byte
24672 alignment; remove it if there's a mapping symbol after the
24675 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24676 void *dummy ATTRIBUTE_UNUSED
)
24678 segment_info_type
*seginfo
= seg_info (sec
);
24681 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24684 for (fragp
= seginfo
->frchainP
->frch_root
;
24686 fragp
= fragp
->fr_next
)
24688 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24689 fragS
*next
= fragp
->fr_next
;
24691 /* Variable-sized frags have been converted to fixed size by
24692 this point. But if this was variable-sized to start with,
24693 there will be a fixed-size frag after it. So don't handle
24695 if (sym
== NULL
|| next
== NULL
)
24698 if (S_GET_VALUE (sym
) < next
->fr_address
)
24699 /* Not at the end of this frag. */
24701 know (S_GET_VALUE (sym
) == next
->fr_address
);
24705 if (next
->tc_frag_data
.first_map
!= NULL
)
24707 /* Next frag starts with a mapping symbol. Discard this
24709 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24713 if (next
->fr_next
== NULL
)
24715 /* This mapping symbol is at the end of the section. Discard
24717 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24718 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24722 /* As long as we have empty frags without any mapping symbols,
24724 /* If the next frag is non-empty and does not start with a
24725 mapping symbol, then this mapping symbol is required. */
24726 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24729 next
= next
->fr_next
;
24731 while (next
!= NULL
);
24736 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24740 arm_adjust_symtab (void)
24745 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24747 if (ARM_IS_THUMB (sym
))
24749 if (THUMB_IS_FUNC (sym
))
24751 /* Mark the symbol as a Thumb function. */
24752 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24753 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24754 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
24756 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
24757 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
24759 as_bad (_("%s: unexpected function type: %d"),
24760 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
24762 else switch (S_GET_STORAGE_CLASS (sym
))
24765 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
24768 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
24771 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
24779 if (ARM_IS_INTERWORK (sym
))
24780 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
24787 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24789 if (ARM_IS_THUMB (sym
))
24791 elf_symbol_type
* elf_sym
;
24793 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
24794 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
24796 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
24797 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
24799 /* If it's a .thumb_func, declare it as so,
24800 otherwise tag label as .code 16. */
24801 if (THUMB_IS_FUNC (sym
))
24802 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
24803 ST_BRANCH_TO_THUMB
);
24804 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
24805 elf_sym
->internal_elf_sym
.st_info
=
24806 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
24811 /* Remove any overlapping mapping symbols generated by alignment frags. */
24812 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
24813 /* Now do generic ELF adjustments. */
24814 elf_adjust_symtab ();
24818 /* MD interface: Initialization. */
24821 set_constant_flonums (void)
24825 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
24826 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
24830 /* Auto-select Thumb mode if it's the only available instruction set for the
24831 given architecture. */
24834 autoselect_thumb_from_cpu_variant (void)
24836 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
24837 opcode_select (16);
24846 if ( (arm_ops_hsh
= hash_new ()) == NULL
24847 || (arm_cond_hsh
= hash_new ()) == NULL
24848 || (arm_shift_hsh
= hash_new ()) == NULL
24849 || (arm_psr_hsh
= hash_new ()) == NULL
24850 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
24851 || (arm_reg_hsh
= hash_new ()) == NULL
24852 || (arm_reloc_hsh
= hash_new ()) == NULL
24853 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
24854 as_fatal (_("virtual memory exhausted"));
24856 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
24857 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
24858 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
24859 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
24860 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
24861 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
24862 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
24863 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
24864 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
24865 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
24866 (void *) (v7m_psrs
+ i
));
24867 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
24868 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
24870 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
24872 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
24873 (void *) (barrier_opt_names
+ i
));
24875 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
24877 struct reloc_entry
* entry
= reloc_names
+ i
;
24879 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
24880 /* This makes encode_branch() use the EABI versions of this relocation. */
24881 entry
->reloc
= BFD_RELOC_UNUSED
;
24883 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
24887 set_constant_flonums ();
24889 /* Set the cpu variant based on the command-line options. We prefer
24890 -mcpu= over -march= if both are set (as for GCC); and we prefer
24891 -mfpu= over any other way of setting the floating point unit.
24892 Use of legacy options with new options are faulted. */
24895 if (mcpu_cpu_opt
|| march_cpu_opt
)
24896 as_bad (_("use of old and new-style options to set CPU type"));
24898 mcpu_cpu_opt
= legacy_cpu
;
24900 else if (!mcpu_cpu_opt
)
24901 mcpu_cpu_opt
= march_cpu_opt
;
24906 as_bad (_("use of old and new-style options to set FPU type"));
24908 mfpu_opt
= legacy_fpu
;
24910 else if (!mfpu_opt
)
24912 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24913 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24914 /* Some environments specify a default FPU. If they don't, infer it
24915 from the processor. */
24917 mfpu_opt
= mcpu_fpu_opt
;
24919 mfpu_opt
= march_fpu_opt
;
24921 mfpu_opt
= &fpu_default
;
24927 if (mcpu_cpu_opt
!= NULL
)
24928 mfpu_opt
= &fpu_default
;
24929 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
24930 mfpu_opt
= &fpu_arch_vfp_v2
;
24932 mfpu_opt
= &fpu_arch_fpa
;
24938 mcpu_cpu_opt
= &cpu_default
;
24939 selected_cpu
= cpu_default
;
24941 else if (no_cpu_selected ())
24942 selected_cpu
= cpu_default
;
24945 selected_cpu
= *mcpu_cpu_opt
;
24947 mcpu_cpu_opt
= &arm_arch_any
;
24950 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
24952 autoselect_thumb_from_cpu_variant ();
24954 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
24956 #if defined OBJ_COFF || defined OBJ_ELF
24958 unsigned int flags
= 0;
24960 #if defined OBJ_ELF
24961 flags
= meabi_flags
;
24963 switch (meabi_flags
)
24965 case EF_ARM_EABI_UNKNOWN
:
24967 /* Set the flags in the private structure. */
24968 if (uses_apcs_26
) flags
|= F_APCS26
;
24969 if (support_interwork
) flags
|= F_INTERWORK
;
24970 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
24971 if (pic_code
) flags
|= F_PIC
;
24972 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
24973 flags
|= F_SOFT_FLOAT
;
24975 switch (mfloat_abi_opt
)
24977 case ARM_FLOAT_ABI_SOFT
:
24978 case ARM_FLOAT_ABI_SOFTFP
:
24979 flags
|= F_SOFT_FLOAT
;
24982 case ARM_FLOAT_ABI_HARD
:
24983 if (flags
& F_SOFT_FLOAT
)
24984 as_bad (_("hard-float conflicts with specified fpu"));
24988 /* Using pure-endian doubles (even if soft-float). */
24989 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
24990 flags
|= F_VFP_FLOAT
;
24992 #if defined OBJ_ELF
24993 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
24994 flags
|= EF_ARM_MAVERICK_FLOAT
;
24997 case EF_ARM_EABI_VER4
:
24998 case EF_ARM_EABI_VER5
:
24999 /* No additional flags to set. */
25006 bfd_set_private_flags (stdoutput
, flags
);
25008 /* We have run out flags in the COFF header to encode the
25009 status of ATPCS support, so instead we create a dummy,
25010 empty, debug section called .arm.atpcs. */
25015 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
25019 bfd_set_section_flags
25020 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
25021 bfd_set_section_size (stdoutput
, sec
, 0);
25022 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
25028 /* Record the CPU type as well. */
25029 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
25030 mach
= bfd_mach_arm_iWMMXt2
;
25031 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
25032 mach
= bfd_mach_arm_iWMMXt
;
25033 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
25034 mach
= bfd_mach_arm_XScale
;
25035 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
25036 mach
= bfd_mach_arm_ep9312
;
25037 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
25038 mach
= bfd_mach_arm_5TE
;
25039 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
25041 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
25042 mach
= bfd_mach_arm_5T
;
25044 mach
= bfd_mach_arm_5
;
25046 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
25048 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
25049 mach
= bfd_mach_arm_4T
;
25051 mach
= bfd_mach_arm_4
;
25053 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
25054 mach
= bfd_mach_arm_3M
;
25055 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
25056 mach
= bfd_mach_arm_3
;
25057 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
25058 mach
= bfd_mach_arm_2a
;
25059 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
25060 mach
= bfd_mach_arm_2
;
25062 mach
= bfd_mach_arm_unknown
;
25064 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
25067 /* Command line processing. */
25070 Invocation line includes a switch not recognized by the base assembler.
25071 See if it's a processor-specific option.
25073 This routine is somewhat complicated by the need for backwards
25074 compatibility (since older releases of gcc can't be changed).
25075 The new options try to make the interface as compatible as
25078 New options (supported) are:
25080 -mcpu=<cpu name> Assemble for selected processor
25081 -march=<architecture name> Assemble for selected architecture
25082 -mfpu=<fpu architecture> Assemble for selected FPU.
25083 -EB/-mbig-endian Big-endian
25084 -EL/-mlittle-endian Little-endian
25085 -k Generate PIC code
25086 -mthumb Start in Thumb mode
25087 -mthumb-interwork Code supports ARM/Thumb interworking
25089 -m[no-]warn-deprecated Warn about deprecated features
25090 -m[no-]warn-syms Warn when symbols match instructions
25092 For now we will also provide support for:
25094 -mapcs-32 32-bit Program counter
25095 -mapcs-26 26-bit Program counter
25096 -macps-float Floats passed in FP registers
25097 -mapcs-reentrant Reentrant code
25099 (sometime these will probably be replaced with -mapcs=<list of options>
25100 and -matpcs=<list of options>)
25102 The remaining options are only supported for back-wards compatibility.
25103 Cpu variants, the arm part is optional:
25104 -m[arm]1 Currently not supported.
25105 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25106 -m[arm]3 Arm 3 processor
25107 -m[arm]6[xx], Arm 6 processors
25108 -m[arm]7[xx][t][[d]m] Arm 7 processors
25109 -m[arm]8[10] Arm 8 processors
25110 -m[arm]9[20][tdmi] Arm 9 processors
25111 -mstrongarm[110[0]] StrongARM processors
25112 -mxscale XScale processors
25113 -m[arm]v[2345[t[e]]] Arm architectures
25114 -mall All (except the ARM1)
25116 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25117 -mfpe-old (No float load/store multiples)
25118 -mvfpxd VFP Single precision
25120 -mno-fpu Disable all floating point instructions
25122 The following CPU names are recognized:
25123 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25124 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25125 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25126 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25127 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25128 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25129 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25133 const char * md_shortopts
= "m:k";
25135 #ifdef ARM_BI_ENDIAN
25136 #define OPTION_EB (OPTION_MD_BASE + 0)
25137 #define OPTION_EL (OPTION_MD_BASE + 1)
25139 #if TARGET_BYTES_BIG_ENDIAN
25140 #define OPTION_EB (OPTION_MD_BASE + 0)
25142 #define OPTION_EL (OPTION_MD_BASE + 1)
25145 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25147 struct option md_longopts
[] =
25150 {"EB", no_argument
, NULL
, OPTION_EB
},
25153 {"EL", no_argument
, NULL
, OPTION_EL
},
25155 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
25156 {NULL
, no_argument
, NULL
, 0}
25160 size_t md_longopts_size
= sizeof (md_longopts
);
25162 struct arm_option_table
25164 const char *option
; /* Option name to match. */
25165 const char *help
; /* Help information. */
25166 int *var
; /* Variable to change. */
25167 int value
; /* What to change it to. */
25168 const char *deprecated
; /* If non-null, print this message. */
25171 struct arm_option_table arm_opts
[] =
25173 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
25174 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
25175 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25176 &support_interwork
, 1, NULL
},
25177 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
25178 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
25179 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
25181 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
25182 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
25183 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
25184 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
25187 /* These are recognized by the assembler, but have no affect on code. */
25188 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
25189 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
25191 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
25192 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25193 &warn_on_deprecated
, 0, NULL
},
25194 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
25195 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
25196 {NULL
, NULL
, NULL
, 0, NULL
}
25199 struct arm_legacy_option_table
25201 const char *option
; /* Option name to match. */
25202 const arm_feature_set
**var
; /* Variable to change. */
25203 const arm_feature_set value
; /* What to change it to. */
25204 const char *deprecated
; /* If non-null, print this message. */
25207 const struct arm_legacy_option_table arm_legacy_opts
[] =
25209 /* DON'T add any new processors to this list -- we want the whole list
25210 to go away... Add them to the processors table instead. */
25211 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25212 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25213 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25214 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25215 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25216 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25217 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25218 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25219 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25220 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25221 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25222 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25223 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25224 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25225 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25226 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25227 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25228 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25229 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25230 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25231 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25232 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25233 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25234 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25235 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25236 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25237 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25238 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25239 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25240 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25241 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25242 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25243 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25244 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25245 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25246 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25247 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25248 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25249 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25250 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25251 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25252 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25253 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25254 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25255 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25256 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25257 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25258 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25259 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25260 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25261 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25262 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25263 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25264 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25265 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25266 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25267 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25268 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25269 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25270 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25271 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25272 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25273 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25274 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25275 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25276 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25277 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25278 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25279 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
25280 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
25281 N_("use -mcpu=strongarm110")},
25282 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
25283 N_("use -mcpu=strongarm1100")},
25284 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
25285 N_("use -mcpu=strongarm1110")},
25286 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
25287 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
25288 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
25290 /* Architecture variants -- don't add any more to this list either. */
25291 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25292 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25293 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25294 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25295 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25296 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25297 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25298 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25299 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25300 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25301 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25302 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25303 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25304 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25305 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25306 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25307 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25308 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25310 /* Floating point variants -- don't add any more to this list either. */
25311 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
25312 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
25313 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
25314 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
25315 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25317 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
25320 struct arm_cpu_option_table
25324 const arm_feature_set value
;
25325 /* For some CPUs we assume an FPU unless the user explicitly sets
25327 const arm_feature_set default_fpu
;
25328 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25330 const char *canonical_name
;
25333 /* This list should, at a minimum, contain all the cpu names
25334 recognized by GCC. */
25335 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25336 static const struct arm_cpu_option_table arm_cpus
[] =
25338 ARM_CPU_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
, NULL
),
25339 ARM_CPU_OPT ("arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
),
25340 ARM_CPU_OPT ("arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
),
25341 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
25342 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
25343 ARM_CPU_OPT ("arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25344 ARM_CPU_OPT ("arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25345 ARM_CPU_OPT ("arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25346 ARM_CPU_OPT ("arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25347 ARM_CPU_OPT ("arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25348 ARM_CPU_OPT ("arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25349 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25350 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25351 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25352 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25353 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25354 ARM_CPU_OPT ("arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25355 ARM_CPU_OPT ("arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25356 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25357 ARM_CPU_OPT ("arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25358 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25359 ARM_CPU_OPT ("arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25360 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25361 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25362 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25363 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25364 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25365 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25366 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25367 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25368 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25369 ARM_CPU_OPT ("arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25370 ARM_CPU_OPT ("arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25371 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25372 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25373 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25374 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25375 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25376 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25377 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"),
25378 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25379 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25380 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25381 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25382 ARM_CPU_OPT ("fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25383 ARM_CPU_OPT ("fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25384 /* For V5 or later processors we default to using VFP; but the user
25385 should really set the FPU type explicitly. */
25386 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25387 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25388 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
25389 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
25390 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
25391 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25392 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"),
25393 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25394 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25395 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"),
25396 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25397 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25398 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25399 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25400 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25401 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"),
25402 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25403 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25404 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25405 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
,
25407 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
25408 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25409 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25410 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25411 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25412 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25413 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"),
25414 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
),
25415 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
,
25417 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
),
25418 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"),
25419 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"),
25420 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
),
25421 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
),
25422 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ
, FPU_NONE
, NULL
),
25423 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ
, FPU_ARCH_VFP_V2
, NULL
),
25424 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC
,
25425 FPU_NONE
, "Cortex-A5"),
25426 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25428 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC
,
25429 ARM_FEATURE_COPROC (FPU_VFP_V3
25430 | FPU_NEON_EXT_V1
),
25432 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC
,
25433 ARM_FEATURE_COPROC (FPU_VFP_V3
25434 | FPU_NEON_EXT_V1
),
25436 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25438 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25440 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25442 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25444 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25446 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25448 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25450 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25452 ARM_CPU_OPT ("cortex-a73", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25454 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"),
25455 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
25457 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV
,
25458 FPU_NONE
, "Cortex-R5"),
25459 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV
,
25460 FPU_ARCH_VFP_V3D16
,
25462 ARM_CPU_OPT ("cortex-r8", ARM_ARCH_V7R_IDIV
,
25463 FPU_ARCH_VFP_V3D16
,
25465 ARM_CPU_OPT ("cortex-m33", ARM_ARCH_V8M_MAIN_DSP
,
25466 FPU_NONE
, "Cortex-M33"),
25467 ARM_CPU_OPT ("cortex-m23", ARM_ARCH_V8M_BASE
,
25468 FPU_NONE
, "Cortex-M23"),
25469 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M7"),
25470 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"),
25471 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"),
25472 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"),
25473 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"),
25474 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0+"),
25475 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25478 ARM_CPU_OPT ("falkor", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25481 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25485 /* ??? XSCALE is really an architecture. */
25486 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25487 /* ??? iwmmxt is not a processor. */
25488 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
),
25489 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
),
25490 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25492 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
25493 FPU_ARCH_MAVERICK
, "ARM920T"),
25494 /* Marvell processors. */
25495 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25497 ARM_EXT2_V6T2_V8M
),
25498 FPU_ARCH_VFP_V3D16
, NULL
),
25499 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25501 ARM_EXT2_V6T2_V8M
),
25502 FPU_ARCH_NEON_VFP_V4
, NULL
),
25503 /* APM X-Gene family. */
25504 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25506 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25509 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
25513 struct arm_arch_option_table
25517 const arm_feature_set value
;
25518 const arm_feature_set default_fpu
;
25521 /* This list should, at a minimum, contain all the architecture names
25522 recognized by GCC. */
25523 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25524 static const struct arm_arch_option_table arm_archs
[] =
25526 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
25527 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
25528 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
25529 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25530 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25531 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
25532 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
25533 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
25534 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
25535 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
25536 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
25537 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
25538 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
25539 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
25540 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
25541 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
25542 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
25543 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25544 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25545 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
25546 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
25547 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25548 kept to preserve existing behaviour. */
25549 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25550 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25551 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
25552 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
25553 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
25554 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25555 kept to preserve existing behaviour. */
25556 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25557 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25558 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
25559 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
25560 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
25561 /* The official spelling of the ARMv7 profile variants is the dashed form.
25562 Accept the non-dashed form for compatibility with old toolchains. */
25563 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25564 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
25565 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25566 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25567 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25568 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25569 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25570 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
25571 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
25572 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
25573 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
25574 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
25575 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
25576 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
25577 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
25578 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
25579 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25581 #undef ARM_ARCH_OPT
25583 /* ISA extensions in the co-processor and main instruction set space. */
25584 struct arm_option_extension_value_table
25588 const arm_feature_set merge_value
;
25589 const arm_feature_set clear_value
;
25590 /* List of architectures for which an extension is available. ARM_ARCH_NONE
25591 indicates that an extension is available for all architectures while
25592 ARM_ANY marks an empty entry. */
25593 const arm_feature_set allowed_archs
[2];
25596 /* The following table must be in alphabetical order with a NULL last entry.
25598 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25599 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25600 static const struct arm_option_extension_value_table arm_extensions
[] =
25602 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25603 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25604 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25605 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
25606 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25607 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25608 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25609 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
25610 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
25611 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25612 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25613 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25615 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25616 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25617 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
25618 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
25619 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
25620 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
25621 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
25622 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
25623 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
25624 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
25625 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25626 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25627 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
25628 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
25629 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25630 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25631 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
25632 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
25633 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
25634 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25635 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
25636 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
25637 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25638 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
25639 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
25640 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25641 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25642 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25643 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
25644 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25645 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
25646 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
25647 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25648 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
25650 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
25651 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25652 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
25653 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
25654 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
25658 /* ISA floating-point and Advanced SIMD extensions. */
25659 struct arm_option_fpu_value_table
25662 const arm_feature_set value
;
25665 /* This list should, at a minimum, contain all the fpu names
25666 recognized by GCC. */
25667 static const struct arm_option_fpu_value_table arm_fpus
[] =
25669 {"softfpa", FPU_NONE
},
25670 {"fpe", FPU_ARCH_FPE
},
25671 {"fpe2", FPU_ARCH_FPE
},
25672 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
25673 {"fpa", FPU_ARCH_FPA
},
25674 {"fpa10", FPU_ARCH_FPA
},
25675 {"fpa11", FPU_ARCH_FPA
},
25676 {"arm7500fe", FPU_ARCH_FPA
},
25677 {"softvfp", FPU_ARCH_VFP
},
25678 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
25679 {"vfp", FPU_ARCH_VFP_V2
},
25680 {"vfp9", FPU_ARCH_VFP_V2
},
25681 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
25682 {"vfp10", FPU_ARCH_VFP_V2
},
25683 {"vfp10-r0", FPU_ARCH_VFP_V1
},
25684 {"vfpxd", FPU_ARCH_VFP_V1xD
},
25685 {"vfpv2", FPU_ARCH_VFP_V2
},
25686 {"vfpv3", FPU_ARCH_VFP_V3
},
25687 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
25688 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
25689 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
25690 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
25691 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
25692 {"arm1020t", FPU_ARCH_VFP_V1
},
25693 {"arm1020e", FPU_ARCH_VFP_V2
},
25694 {"arm1136jfs", FPU_ARCH_VFP_V2
},
25695 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
25696 {"maverick", FPU_ARCH_MAVERICK
},
25697 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
25698 {"neon-fp16", FPU_ARCH_NEON_FP16
},
25699 {"vfpv4", FPU_ARCH_VFP_V4
},
25700 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
25701 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
25702 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
25703 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
25704 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
25705 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
25706 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
25707 {"crypto-neon-fp-armv8",
25708 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
25709 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
25710 {"crypto-neon-fp-armv8.1",
25711 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
25712 {NULL
, ARM_ARCH_NONE
}
25715 struct arm_option_value_table
25721 static const struct arm_option_value_table arm_float_abis
[] =
25723 {"hard", ARM_FLOAT_ABI_HARD
},
25724 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
25725 {"soft", ARM_FLOAT_ABI_SOFT
},
25730 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25731 static const struct arm_option_value_table arm_eabis
[] =
25733 {"gnu", EF_ARM_EABI_UNKNOWN
},
25734 {"4", EF_ARM_EABI_VER4
},
25735 {"5", EF_ARM_EABI_VER5
},
25740 struct arm_long_option_table
25742 const char * option
; /* Substring to match. */
25743 const char * help
; /* Help information. */
25744 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
25745 const char * deprecated
; /* If non-null, print this message. */
25749 arm_parse_extension (const char *str
, const arm_feature_set
**opt_p
)
25751 arm_feature_set
*ext_set
= XNEW (arm_feature_set
);
25753 /* We insist on extensions being specified in alphabetical order, and with
25754 extensions being added before being removed. We achieve this by having
25755 the global ARM_EXTENSIONS table in alphabetical order, and using the
25756 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25757 or removing it (0) and only allowing it to change in the order
25759 const struct arm_option_extension_value_table
* opt
= NULL
;
25760 const arm_feature_set arm_any
= ARM_ANY
;
25761 int adding_value
= -1;
25763 /* Copy the feature set, so that we can modify it. */
25764 *ext_set
= **opt_p
;
25767 while (str
!= NULL
&& *str
!= 0)
25774 as_bad (_("invalid architectural extension"));
25779 ext
= strchr (str
, '+');
25784 len
= strlen (str
);
25786 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
25788 if (adding_value
!= 0)
25791 opt
= arm_extensions
;
25799 if (adding_value
== -1)
25802 opt
= arm_extensions
;
25804 else if (adding_value
!= 1)
25806 as_bad (_("must specify extensions to add before specifying "
25807 "those to remove"));
25814 as_bad (_("missing architectural extension"));
25818 gas_assert (adding_value
!= -1);
25819 gas_assert (opt
!= NULL
);
25821 /* Scan over the options table trying to find an exact match. */
25822 for (; opt
->name
!= NULL
; opt
++)
25823 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25825 int i
, nb_allowed_archs
=
25826 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
25827 /* Check we can apply the extension to this architecture. */
25828 for (i
= 0; i
< nb_allowed_archs
; i
++)
25831 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
25833 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *ext_set
))
25836 if (i
== nb_allowed_archs
)
25838 as_bad (_("extension does not apply to the base architecture"));
25842 /* Add or remove the extension. */
25844 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
25846 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
25851 if (opt
->name
== NULL
)
25853 /* Did we fail to find an extension because it wasn't specified in
25854 alphabetical order, or because it does not exist? */
25856 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
25857 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25860 if (opt
->name
== NULL
)
25861 as_bad (_("unknown architectural extension `%s'"), str
);
25863 as_bad (_("architectural extensions must be specified in "
25864 "alphabetical order"));
25870 /* We should skip the extension we've just matched the next time
25882 arm_parse_cpu (const char *str
)
25884 const struct arm_cpu_option_table
*opt
;
25885 const char *ext
= strchr (str
, '+');
25891 len
= strlen (str
);
25895 as_bad (_("missing cpu name `%s'"), str
);
25899 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
25900 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25902 mcpu_cpu_opt
= &opt
->value
;
25903 mcpu_fpu_opt
= &opt
->default_fpu
;
25904 if (opt
->canonical_name
)
25906 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
25907 strcpy (selected_cpu_name
, opt
->canonical_name
);
25913 if (len
>= sizeof selected_cpu_name
)
25914 len
= (sizeof selected_cpu_name
) - 1;
25916 for (i
= 0; i
< len
; i
++)
25917 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25918 selected_cpu_name
[i
] = 0;
25922 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
25927 as_bad (_("unknown cpu `%s'"), str
);
25932 arm_parse_arch (const char *str
)
25934 const struct arm_arch_option_table
*opt
;
25935 const char *ext
= strchr (str
, '+');
25941 len
= strlen (str
);
25945 as_bad (_("missing architecture name `%s'"), str
);
25949 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
25950 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25952 march_cpu_opt
= &opt
->value
;
25953 march_fpu_opt
= &opt
->default_fpu
;
25954 strcpy (selected_cpu_name
, opt
->name
);
25957 return arm_parse_extension (ext
, &march_cpu_opt
);
25962 as_bad (_("unknown architecture `%s'\n"), str
);
25967 arm_parse_fpu (const char * str
)
25969 const struct arm_option_fpu_value_table
* opt
;
25971 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
25972 if (streq (opt
->name
, str
))
25974 mfpu_opt
= &opt
->value
;
25978 as_bad (_("unknown floating point format `%s'\n"), str
);
25983 arm_parse_float_abi (const char * str
)
25985 const struct arm_option_value_table
* opt
;
25987 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
25988 if (streq (opt
->name
, str
))
25990 mfloat_abi_opt
= opt
->value
;
25994 as_bad (_("unknown floating point abi `%s'\n"), str
);
26000 arm_parse_eabi (const char * str
)
26002 const struct arm_option_value_table
*opt
;
26004 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
26005 if (streq (opt
->name
, str
))
26007 meabi_flags
= opt
->value
;
26010 as_bad (_("unknown EABI `%s'\n"), str
);
26016 arm_parse_it_mode (const char * str
)
26018 bfd_boolean ret
= TRUE
;
26020 if (streq ("arm", str
))
26021 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
26022 else if (streq ("thumb", str
))
26023 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
26024 else if (streq ("always", str
))
26025 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
26026 else if (streq ("never", str
))
26027 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
26030 as_bad (_("unknown implicit IT mode `%s', should be "\
26031 "arm, thumb, always, or never."), str
);
26039 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
26041 codecomposer_syntax
= TRUE
;
26042 arm_comment_chars
[0] = ';';
26043 arm_line_separator_chars
[0] = 0;
26047 struct arm_long_option_table arm_long_opts
[] =
26049 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26050 arm_parse_cpu
, NULL
},
26051 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26052 arm_parse_arch
, NULL
},
26053 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26054 arm_parse_fpu
, NULL
},
26055 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26056 arm_parse_float_abi
, NULL
},
26058 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26059 arm_parse_eabi
, NULL
},
26061 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26062 arm_parse_it_mode
, NULL
},
26063 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26064 arm_ccs_mode
, NULL
},
26065 {NULL
, NULL
, 0, NULL
}
26069 md_parse_option (int c
, const char * arg
)
26071 struct arm_option_table
*opt
;
26072 const struct arm_legacy_option_table
*fopt
;
26073 struct arm_long_option_table
*lopt
;
26079 target_big_endian
= 1;
26085 target_big_endian
= 0;
26089 case OPTION_FIX_V4BX
:
26094 /* Listing option. Just ignore these, we don't support additional
26099 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26101 if (c
== opt
->option
[0]
26102 && ((arg
== NULL
&& opt
->option
[1] == 0)
26103 || streq (arg
, opt
->option
+ 1)))
26105 /* If the option is deprecated, tell the user. */
26106 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
26107 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26108 arg
? arg
: "", _(opt
->deprecated
));
26110 if (opt
->var
!= NULL
)
26111 *opt
->var
= opt
->value
;
26117 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
26119 if (c
== fopt
->option
[0]
26120 && ((arg
== NULL
&& fopt
->option
[1] == 0)
26121 || streq (arg
, fopt
->option
+ 1)))
26123 /* If the option is deprecated, tell the user. */
26124 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
26125 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26126 arg
? arg
: "", _(fopt
->deprecated
));
26128 if (fopt
->var
!= NULL
)
26129 *fopt
->var
= &fopt
->value
;
26135 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26137 /* These options are expected to have an argument. */
26138 if (c
== lopt
->option
[0]
26140 && strncmp (arg
, lopt
->option
+ 1,
26141 strlen (lopt
->option
+ 1)) == 0)
26143 /* If the option is deprecated, tell the user. */
26144 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
26145 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
26146 _(lopt
->deprecated
));
26148 /* Call the sup-option parser. */
26149 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
26160 md_show_usage (FILE * fp
)
26162 struct arm_option_table
*opt
;
26163 struct arm_long_option_table
*lopt
;
26165 fprintf (fp
, _(" ARM-specific assembler options:\n"));
26167 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26168 if (opt
->help
!= NULL
)
26169 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
26171 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26172 if (lopt
->help
!= NULL
)
26173 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
26177 -EB assemble code for a big-endian cpu\n"));
26182 -EL assemble code for a little-endian cpu\n"));
26186 --fix-v4bx Allow BX in ARMv4 code\n"));
26194 arm_feature_set flags
;
26195 } cpu_arch_ver_table
;
26197 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
26198 must be sorted least features first but some reordering is needed, eg. for
26199 Thumb-2 instructions to be detected as coming from ARMv6T2. */
26200 static const cpu_arch_ver_table cpu_arch_ver
[] =
26206 {4, ARM_ARCH_V5TE
},
26207 {5, ARM_ARCH_V5TEJ
},
26211 {11, ARM_ARCH_V6M
},
26212 {12, ARM_ARCH_V6SM
},
26213 {8, ARM_ARCH_V6T2
},
26214 {10, ARM_ARCH_V7VE
},
26215 {10, ARM_ARCH_V7R
},
26216 {10, ARM_ARCH_V7M
},
26217 {14, ARM_ARCH_V8A
},
26218 {16, ARM_ARCH_V8M_BASE
},
26219 {17, ARM_ARCH_V8M_MAIN
},
26223 /* Set an attribute if it has not already been set by the user. */
26225 aeabi_set_attribute_int (int tag
, int value
)
26228 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26229 || !attributes_set_explicitly
[tag
])
26230 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
26234 aeabi_set_attribute_string (int tag
, const char *value
)
26237 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26238 || !attributes_set_explicitly
[tag
])
26239 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
26242 /* Set the public EABI object attributes. */
26244 aeabi_set_public_attributes (void)
26249 int fp16_optional
= 0;
26250 arm_feature_set arm_arch
= ARM_ARCH_NONE
;
26251 arm_feature_set flags
;
26252 arm_feature_set tmp
;
26253 arm_feature_set arm_arch_v8m_base
= ARM_ARCH_V8M_BASE
;
26254 const cpu_arch_ver_table
*p
;
26256 /* Choose the architecture based on the capabilities of the requested cpu
26257 (if any) and/or the instructions actually used. */
26258 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
26259 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
26260 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
26262 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
26263 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
26265 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
26266 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
26268 selected_cpu
= flags
;
26270 /* Allow the user to override the reported architecture. */
26273 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
26274 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
26277 /* We need to make sure that the attributes do not identify us as v6S-M
26278 when the only v6S-M feature in use is the Operating System Extensions. */
26279 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
26280 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
26281 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
26285 for (p
= cpu_arch_ver
; p
->val
; p
++)
26287 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
26290 arm_arch
= p
->flags
;
26291 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
26295 /* The table lookup above finds the last architecture to contribute
26296 a new feature. Unfortunately, Tag13 is a subset of the union of
26297 v6T2 and v7-M, so it is never seen as contributing a new feature.
26298 We can not search for the last entry which is entirely used,
26299 because if no CPU is specified we build up only those flags
26300 actually used. Perhaps we should separate out the specified
26301 and implicit cases. Avoid taking this path for -march=all by
26302 checking for contradictory v7-A / v7-M features. */
26303 if (arch
== TAG_CPU_ARCH_V7
26304 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26305 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
26306 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
26308 arch
= TAG_CPU_ARCH_V7E_M
;
26309 arm_arch
= (arm_feature_set
) ARM_ARCH_V7EM
;
26312 ARM_CLEAR_FEATURE (tmp
, flags
, arm_arch_v8m_base
);
26313 if (arch
== TAG_CPU_ARCH_V8M_BASE
&& ARM_CPU_HAS_FEATURE (tmp
, arm_arch_any
))
26315 arch
= TAG_CPU_ARCH_V8M_MAIN
;
26316 arm_arch
= (arm_feature_set
) ARM_ARCH_V8M_MAIN
;
26319 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26320 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26321 ARMv8-M, -march=all must be detected as ARMv8-A. */
26322 if (arch
== TAG_CPU_ARCH_V8M_MAIN
26323 && ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
26325 arch
= TAG_CPU_ARCH_V8
;
26326 arm_arch
= (arm_feature_set
) ARM_ARCH_V8A
;
26329 /* Tag_CPU_name. */
26330 if (selected_cpu_name
[0])
26334 q
= selected_cpu_name
;
26335 if (strncmp (q
, "armv", 4) == 0)
26340 for (i
= 0; q
[i
]; i
++)
26341 q
[i
] = TOUPPER (q
[i
]);
26343 aeabi_set_attribute_string (Tag_CPU_name
, q
);
26346 /* Tag_CPU_arch. */
26347 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
26349 /* Tag_CPU_arch_profile. */
26350 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26351 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26352 || (ARM_CPU_HAS_FEATURE (flags
, arm_ext_atomics
)
26353 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
)))
26355 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
26357 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
26362 if (profile
!= '\0')
26363 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
26365 /* Tag_DSP_extension. */
26366 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_dsp
))
26368 arm_feature_set ext
;
26370 /* DSP instructions not in architecture. */
26371 ARM_CLEAR_FEATURE (ext
, flags
, arm_arch
);
26372 if (ARM_CPU_HAS_FEATURE (ext
, arm_ext_dsp
))
26373 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
26376 /* Tag_ARM_ISA_use. */
26377 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
26379 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
26381 /* Tag_THUMB_ISA_use. */
26382 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
26387 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26388 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
26390 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
26394 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
26397 /* Tag_VFP_arch. */
26398 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
26399 aeabi_set_attribute_int (Tag_VFP_arch
,
26400 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26402 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
26403 aeabi_set_attribute_int (Tag_VFP_arch
,
26404 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26406 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
26409 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
26411 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
26413 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
26416 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
26417 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
26418 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
26419 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
26420 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
26422 /* Tag_ABI_HardFP_use. */
26423 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
26424 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
26425 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
26427 /* Tag_WMMX_arch. */
26428 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
26429 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
26430 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
26431 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
26433 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26434 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
26435 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
26436 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
26437 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
26438 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
26440 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
26442 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
26446 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
26451 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26452 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
26453 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
26457 We set Tag_DIV_use to two when integer divide instructions have been used
26458 in ARM state, or when Thumb integer divide instructions have been used,
26459 but we have no architecture profile set, nor have we any ARM instructions.
26461 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26462 by the base architecture.
26464 For new architectures we will have to check these tests. */
26465 gas_assert (arch
<= TAG_CPU_ARCH_V8
26466 || (arch
>= TAG_CPU_ARCH_V8M_BASE
26467 && arch
<= TAG_CPU_ARCH_V8M_MAIN
));
26468 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26469 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
26470 aeabi_set_attribute_int (Tag_DIV_use
, 0);
26471 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
26472 || (profile
== '\0'
26473 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
26474 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
26475 aeabi_set_attribute_int (Tag_DIV_use
, 2);
26477 /* Tag_MP_extension_use. */
26478 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
26479 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
26481 /* Tag Virtualization_use. */
26482 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
26484 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
26487 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
26490 /* Add the default contents for the .ARM.attributes section. */
26494 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26497 aeabi_set_public_attributes ();
26499 #endif /* OBJ_ELF */
26502 /* Parse a .cpu directive. */
26505 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
26507 const struct arm_cpu_option_table
*opt
;
26511 name
= input_line_pointer
;
26512 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26513 input_line_pointer
++;
26514 saved_char
= *input_line_pointer
;
26515 *input_line_pointer
= 0;
26517 /* Skip the first "all" entry. */
26518 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
26519 if (streq (opt
->name
, name
))
26521 mcpu_cpu_opt
= &opt
->value
;
26522 selected_cpu
= opt
->value
;
26523 if (opt
->canonical_name
)
26524 strcpy (selected_cpu_name
, opt
->canonical_name
);
26528 for (i
= 0; opt
->name
[i
]; i
++)
26529 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
26531 selected_cpu_name
[i
] = 0;
26533 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26534 *input_line_pointer
= saved_char
;
26535 demand_empty_rest_of_line ();
26538 as_bad (_("unknown cpu `%s'"), name
);
26539 *input_line_pointer
= saved_char
;
26540 ignore_rest_of_line ();
26544 /* Parse a .arch directive. */
26547 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
26549 const struct arm_arch_option_table
*opt
;
26553 name
= input_line_pointer
;
26554 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26555 input_line_pointer
++;
26556 saved_char
= *input_line_pointer
;
26557 *input_line_pointer
= 0;
26559 /* Skip the first "all" entry. */
26560 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26561 if (streq (opt
->name
, name
))
26563 mcpu_cpu_opt
= &opt
->value
;
26564 selected_cpu
= opt
->value
;
26565 strcpy (selected_cpu_name
, opt
->name
);
26566 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26567 *input_line_pointer
= saved_char
;
26568 demand_empty_rest_of_line ();
26572 as_bad (_("unknown architecture `%s'\n"), name
);
26573 *input_line_pointer
= saved_char
;
26574 ignore_rest_of_line ();
26578 /* Parse a .object_arch directive. */
26581 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
26583 const struct arm_arch_option_table
*opt
;
26587 name
= input_line_pointer
;
26588 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26589 input_line_pointer
++;
26590 saved_char
= *input_line_pointer
;
26591 *input_line_pointer
= 0;
26593 /* Skip the first "all" entry. */
26594 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26595 if (streq (opt
->name
, name
))
26597 object_arch
= &opt
->value
;
26598 *input_line_pointer
= saved_char
;
26599 demand_empty_rest_of_line ();
26603 as_bad (_("unknown architecture `%s'\n"), name
);
26604 *input_line_pointer
= saved_char
;
26605 ignore_rest_of_line ();
26608 /* Parse a .arch_extension directive. */
26611 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
26613 const struct arm_option_extension_value_table
*opt
;
26614 const arm_feature_set arm_any
= ARM_ANY
;
26617 int adding_value
= 1;
26619 name
= input_line_pointer
;
26620 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26621 input_line_pointer
++;
26622 saved_char
= *input_line_pointer
;
26623 *input_line_pointer
= 0;
26625 if (strlen (name
) >= 2
26626 && strncmp (name
, "no", 2) == 0)
26632 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26633 if (streq (opt
->name
, name
))
26635 int i
, nb_allowed_archs
=
26636 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
26637 for (i
= 0; i
< nb_allowed_archs
; i
++)
26640 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
26642 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *mcpu_cpu_opt
))
26646 if (i
== nb_allowed_archs
)
26648 as_bad (_("architectural extension `%s' is not allowed for the "
26649 "current base architecture"), name
);
26654 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
,
26657 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->clear_value
);
26659 mcpu_cpu_opt
= &selected_cpu
;
26660 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26661 *input_line_pointer
= saved_char
;
26662 demand_empty_rest_of_line ();
26666 if (opt
->name
== NULL
)
26667 as_bad (_("unknown architecture extension `%s'\n"), name
);
26669 *input_line_pointer
= saved_char
;
26670 ignore_rest_of_line ();
26673 /* Parse a .fpu directive. */
26676 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
26678 const struct arm_option_fpu_value_table
*opt
;
26682 name
= input_line_pointer
;
26683 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26684 input_line_pointer
++;
26685 saved_char
= *input_line_pointer
;
26686 *input_line_pointer
= 0;
26688 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26689 if (streq (opt
->name
, name
))
26691 mfpu_opt
= &opt
->value
;
26692 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26693 *input_line_pointer
= saved_char
;
26694 demand_empty_rest_of_line ();
26698 as_bad (_("unknown floating point format `%s'\n"), name
);
26699 *input_line_pointer
= saved_char
;
26700 ignore_rest_of_line ();
26703 /* Copy symbol information. */
26706 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
26708 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
26712 /* Given a symbolic attribute NAME, return the proper integer value.
26713 Returns -1 if the attribute is not known. */
26716 arm_convert_symbolic_attribute (const char *name
)
26718 static const struct
26723 attribute_table
[] =
26725 /* When you modify this table you should
26726 also modify the list in doc/c-arm.texi. */
26727 #define T(tag) {#tag, tag}
26728 T (Tag_CPU_raw_name
),
26731 T (Tag_CPU_arch_profile
),
26732 T (Tag_ARM_ISA_use
),
26733 T (Tag_THUMB_ISA_use
),
26737 T (Tag_Advanced_SIMD_arch
),
26738 T (Tag_PCS_config
),
26739 T (Tag_ABI_PCS_R9_use
),
26740 T (Tag_ABI_PCS_RW_data
),
26741 T (Tag_ABI_PCS_RO_data
),
26742 T (Tag_ABI_PCS_GOT_use
),
26743 T (Tag_ABI_PCS_wchar_t
),
26744 T (Tag_ABI_FP_rounding
),
26745 T (Tag_ABI_FP_denormal
),
26746 T (Tag_ABI_FP_exceptions
),
26747 T (Tag_ABI_FP_user_exceptions
),
26748 T (Tag_ABI_FP_number_model
),
26749 T (Tag_ABI_align_needed
),
26750 T (Tag_ABI_align8_needed
),
26751 T (Tag_ABI_align_preserved
),
26752 T (Tag_ABI_align8_preserved
),
26753 T (Tag_ABI_enum_size
),
26754 T (Tag_ABI_HardFP_use
),
26755 T (Tag_ABI_VFP_args
),
26756 T (Tag_ABI_WMMX_args
),
26757 T (Tag_ABI_optimization_goals
),
26758 T (Tag_ABI_FP_optimization_goals
),
26759 T (Tag_compatibility
),
26760 T (Tag_CPU_unaligned_access
),
26761 T (Tag_FP_HP_extension
),
26762 T (Tag_VFP_HP_extension
),
26763 T (Tag_ABI_FP_16bit_format
),
26764 T (Tag_MPextension_use
),
26766 T (Tag_nodefaults
),
26767 T (Tag_also_compatible_with
),
26768 T (Tag_conformance
),
26770 T (Tag_Virtualization_use
),
26771 T (Tag_DSP_extension
),
26772 /* We deliberately do not include Tag_MPextension_use_legacy. */
26780 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
26781 if (streq (name
, attribute_table
[i
].name
))
26782 return attribute_table
[i
].tag
;
26788 /* Apply sym value for relocations only in the case that they are for
26789 local symbols in the same segment as the fixup and you have the
26790 respective architectural feature for blx and simple switches. */
26792 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
26795 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
26796 /* PR 17444: If the local symbol is in a different section then a reloc
26797 will always be generated for it, so applying the symbol value now
26798 will result in a double offset being stored in the relocation. */
26799 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
26800 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
26802 switch (fixP
->fx_r_type
)
26804 case BFD_RELOC_ARM_PCREL_BLX
:
26805 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26806 if (ARM_IS_FUNC (fixP
->fx_addsy
))
26810 case BFD_RELOC_ARM_PCREL_CALL
:
26811 case BFD_RELOC_THUMB_PCREL_BLX
:
26812 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
26823 #endif /* OBJ_ELF */