1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
165 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
167 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
170 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
173 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
174 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
175 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
176 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
177 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
178 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
179 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
180 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
181 static const arm_feature_set arm_ext_v4t_5
=
182 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
183 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
184 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
185 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
186 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
187 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
188 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
189 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
190 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
191 static const arm_feature_set arm_ext_v6_notm
=
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
193 static const arm_feature_set arm_ext_v6_dsp
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
195 static const arm_feature_set arm_ext_barrier
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
197 static const arm_feature_set arm_ext_msr
=
198 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
199 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
200 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
201 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
202 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
204 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
206 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
207 static const arm_feature_set arm_ext_m
=
208 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
,
209 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
210 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
211 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
212 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
213 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
214 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
215 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
216 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
217 static const arm_feature_set arm_ext_v8m_main
=
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
219 /* Instructions in ARMv8-M only found in M profile architectures. */
220 static const arm_feature_set arm_ext_v8m_m_only
=
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
222 static const arm_feature_set arm_ext_v6t2_v8m
=
223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
224 /* Instructions shared between ARMv8-A and ARMv8-M. */
225 static const arm_feature_set arm_ext_atomics
=
226 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
228 /* DSP instructions Tag_DSP_extension refers to. */
229 static const arm_feature_set arm_ext_dsp
=
230 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
232 static const arm_feature_set arm_ext_ras
=
233 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
234 /* FP16 instructions. */
235 static const arm_feature_set arm_ext_fp16
=
236 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
238 static const arm_feature_set arm_arch_any
= ARM_ANY
;
239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
240 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
241 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
243 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
246 static const arm_feature_set arm_cext_iwmmxt2
=
247 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
248 static const arm_feature_set arm_cext_iwmmxt
=
249 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
250 static const arm_feature_set arm_cext_xscale
=
251 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
252 static const arm_feature_set arm_cext_maverick
=
253 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
254 static const arm_feature_set fpu_fpa_ext_v1
=
255 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
256 static const arm_feature_set fpu_fpa_ext_v2
=
257 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
258 static const arm_feature_set fpu_vfp_ext_v1xd
=
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
260 static const arm_feature_set fpu_vfp_ext_v1
=
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
262 static const arm_feature_set fpu_vfp_ext_v2
=
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
264 static const arm_feature_set fpu_vfp_ext_v3xd
=
265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
266 static const arm_feature_set fpu_vfp_ext_v3
=
267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
268 static const arm_feature_set fpu_vfp_ext_d32
=
269 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
270 static const arm_feature_set fpu_neon_ext_v1
=
271 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
272 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
273 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
275 static const arm_feature_set fpu_vfp_fp16
=
276 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
277 static const arm_feature_set fpu_neon_ext_fma
=
278 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
280 static const arm_feature_set fpu_vfp_ext_fma
=
281 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
282 static const arm_feature_set fpu_vfp_ext_armv8
=
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
284 static const arm_feature_set fpu_vfp_ext_armv8xd
=
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
286 static const arm_feature_set fpu_neon_ext_armv8
=
287 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
288 static const arm_feature_set fpu_crypto_ext_armv8
=
289 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
290 static const arm_feature_set crc_ext_armv8
=
291 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
292 static const arm_feature_set fpu_neon_ext_v8_1
=
293 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
295 static int mfloat_abi_opt
= -1;
296 /* Record user cpu selection for object attributes. */
297 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
298 /* Must be long enough to hold any of the names in arm_cpus. */
299 static char selected_cpu_name
[20];
301 extern FLONUM_TYPE generic_floating_point_number
;
303 /* Return if no cpu was selected on command-line. */
305 no_cpu_selected (void)
307 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
312 static int meabi_flags
= EABI_DEFAULT
;
314 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
317 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
322 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
327 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
328 symbolS
* GOT_symbol
;
331 /* 0: assemble for ARM,
332 1: assemble for Thumb,
333 2: assemble for Thumb even though target CPU does not support thumb
335 static int thumb_mode
= 0;
336 /* A value distinct from the possible values for thumb_mode that we
337 can use to record whether thumb_mode has been copied into the
338 tc_frag_data field of a frag. */
339 #define MODE_RECORDED (1 << 4)
341 /* Specifies the intrinsic IT insn behavior mode. */
342 enum implicit_it_mode
344 IMPLICIT_IT_MODE_NEVER
= 0x00,
345 IMPLICIT_IT_MODE_ARM
= 0x01,
346 IMPLICIT_IT_MODE_THUMB
= 0x02,
347 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
349 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
351 /* If unified_syntax is true, we are processing the new unified
352 ARM/Thumb syntax. Important differences from the old ARM mode:
354 - Immediate operands do not require a # prefix.
355 - Conditional affixes always appear at the end of the
356 instruction. (For backward compatibility, those instructions
357 that formerly had them in the middle, continue to accept them
359 - The IT instruction may appear, and if it does is validated
360 against subsequent conditional affixes. It does not generate
363 Important differences from the old Thumb mode:
365 - Immediate operands do not require a # prefix.
366 - Most of the V6T2 instructions are only available in unified mode.
367 - The .N and .W suffixes are recognized and honored (it is an error
368 if they cannot be honored).
369 - All instructions set the flags if and only if they have an 's' affix.
370 - Conditional affixes may be used. They are validated against
371 preceding IT instructions. Unlike ARM mode, you cannot use a
372 conditional affix except in the scope of an IT instruction. */
374 static bfd_boolean unified_syntax
= FALSE
;
376 /* An immediate operand can start with #, and ld*, st*, pld operands
377 can contain [ and ]. We need to tell APP not to elide whitespace
378 before a [, which can appear as the first operand for pld.
379 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
380 const char arm_symbol_chars
[] = "#[]{}";
395 enum neon_el_type type
;
399 #define NEON_MAX_TYPE_ELS 4
403 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
407 enum it_instruction_type
412 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
413 if inside, should be the last one. */
414 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
415 i.e. BKPT and NOP. */
416 IT_INSN
/* The IT insn has been parsed. */
419 /* The maximum number of operands we need. */
420 #define ARM_IT_MAX_OPERANDS 6
425 unsigned long instruction
;
429 /* "uncond_value" is set to the value in place of the conditional field in
430 unconditional versions of the instruction, or -1 if nothing is
433 struct neon_type vectype
;
434 /* This does not indicate an actual NEON instruction, only that
435 the mnemonic accepts neon-style type suffixes. */
437 /* Set to the opcode if the instruction needs relaxation.
438 Zero if the instruction is not relaxed. */
442 bfd_reloc_code_real_type type
;
447 enum it_instruction_type it_insn_type
;
453 struct neon_type_el vectype
;
454 unsigned present
: 1; /* Operand present. */
455 unsigned isreg
: 1; /* Operand was a register. */
456 unsigned immisreg
: 1; /* .imm field is a second register. */
457 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
458 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
459 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
460 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
461 instructions. This allows us to disambiguate ARM <-> vector insns. */
462 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
463 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
464 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
465 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
466 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
467 unsigned writeback
: 1; /* Operand has trailing ! */
468 unsigned preind
: 1; /* Preindexed address. */
469 unsigned postind
: 1; /* Postindexed address. */
470 unsigned negative
: 1; /* Index register was negated. */
471 unsigned shifted
: 1; /* Shift applied to operation. */
472 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
473 } operands
[ARM_IT_MAX_OPERANDS
];
476 static struct arm_it inst
;
478 #define NUM_FLOAT_VALS 8
480 const char * fp_const
[] =
482 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
485 /* Number of littlenums required to hold an extended precision number. */
486 #define MAX_LITTLENUMS 6
488 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
498 #define CP_T_X 0x00008000
499 #define CP_T_Y 0x00400000
501 #define CONDS_BIT 0x00100000
502 #define LOAD_BIT 0x00100000
504 #define DOUBLE_LOAD_FLAG 0x00000001
508 const char * template_name
;
512 #define COND_ALWAYS 0xE
516 const char * template_name
;
520 struct asm_barrier_opt
522 const char * template_name
;
524 const arm_feature_set arch
;
527 /* The bit that distinguishes CPSR and SPSR. */
528 #define SPSR_BIT (1 << 22)
530 /* The individual PSR flag bits. */
531 #define PSR_c (1 << 16)
532 #define PSR_x (1 << 17)
533 #define PSR_s (1 << 18)
534 #define PSR_f (1 << 19)
539 bfd_reloc_code_real_type reloc
;
544 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
545 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
550 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
553 /* Bits for DEFINED field in neon_typed_alias. */
554 #define NTA_HASTYPE 1
555 #define NTA_HASINDEX 2
557 struct neon_typed_alias
559 unsigned char defined
;
561 struct neon_type_el eltype
;
564 /* ARM register categories. This includes coprocessor numbers and various
565 architecture extensions' registers. */
592 /* Structure for a hash table entry for a register.
593 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
594 information which states whether a vector type or index is specified (for a
595 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
601 unsigned char builtin
;
602 struct neon_typed_alias
* neon
;
605 /* Diagnostics used when we don't get a register of the expected type. */
606 const char * const reg_expected_msgs
[] =
608 N_("ARM register expected"),
609 N_("bad or missing co-processor number"),
610 N_("co-processor register expected"),
611 N_("FPA register expected"),
612 N_("VFP single precision register expected"),
613 N_("VFP/Neon double precision register expected"),
614 N_("Neon quad precision register expected"),
615 N_("VFP single or double precision register expected"),
616 N_("Neon double or quad precision register expected"),
617 N_("VFP single, double or Neon quad precision register expected"),
618 N_("VFP system register expected"),
619 N_("Maverick MVF register expected"),
620 N_("Maverick MVD register expected"),
621 N_("Maverick MVFX register expected"),
622 N_("Maverick MVDX register expected"),
623 N_("Maverick MVAX register expected"),
624 N_("Maverick DSPSC register expected"),
625 N_("iWMMXt data register expected"),
626 N_("iWMMXt control register expected"),
627 N_("iWMMXt scalar register expected"),
628 N_("XScale accumulator register expected"),
631 /* Some well known registers that we refer to directly elsewhere. */
637 /* ARM instructions take 4bytes in the object file, Thumb instructions
643 /* Basic string to match. */
644 const char * template_name
;
646 /* Parameters to instruction. */
647 unsigned int operands
[8];
649 /* Conditional tag - see opcode_lookup. */
650 unsigned int tag
: 4;
652 /* Basic instruction code. */
653 unsigned int avalue
: 28;
655 /* Thumb-format instruction code. */
658 /* Which architecture variant provides this instruction. */
659 const arm_feature_set
* avariant
;
660 const arm_feature_set
* tvariant
;
662 /* Function to call to encode instruction in ARM format. */
663 void (* aencode
) (void);
665 /* Function to call to encode instruction in Thumb format. */
666 void (* tencode
) (void);
669 /* Defines for various bits that we will want to toggle. */
670 #define INST_IMMEDIATE 0x02000000
671 #define OFFSET_REG 0x02000000
672 #define HWOFFSET_IMM 0x00400000
673 #define SHIFT_BY_REG 0x00000010
674 #define PRE_INDEX 0x01000000
675 #define INDEX_UP 0x00800000
676 #define WRITE_BACK 0x00200000
677 #define LDM_TYPE_2_OR_3 0x00400000
678 #define CPSI_MMOD 0x00020000
680 #define LITERAL_MASK 0xf000f000
681 #define OPCODE_MASK 0xfe1fffff
682 #define V4_STR_BIT 0x00000020
683 #define VLDR_VMOV_SAME 0x0040f000
685 #define T2_SUBS_PC_LR 0xf3de8f00
687 #define DATA_OP_SHIFT 21
689 #define T2_OPCODE_MASK 0xfe1fffff
690 #define T2_DATA_OP_SHIFT 21
692 #define A_COND_MASK 0xf0000000
693 #define A_PUSH_POP_OP_MASK 0x0fff0000
695 /* Opcodes for pushing/poping registers to/from the stack. */
696 #define A1_OPCODE_PUSH 0x092d0000
697 #define A2_OPCODE_PUSH 0x052d0004
698 #define A2_OPCODE_POP 0x049d0004
700 /* Codes to distinguish the arithmetic instructions. */
711 #define OPCODE_CMP 10
712 #define OPCODE_CMN 11
713 #define OPCODE_ORR 12
714 #define OPCODE_MOV 13
715 #define OPCODE_BIC 14
716 #define OPCODE_MVN 15
718 #define T2_OPCODE_AND 0
719 #define T2_OPCODE_BIC 1
720 #define T2_OPCODE_ORR 2
721 #define T2_OPCODE_ORN 3
722 #define T2_OPCODE_EOR 4
723 #define T2_OPCODE_ADD 8
724 #define T2_OPCODE_ADC 10
725 #define T2_OPCODE_SBC 11
726 #define T2_OPCODE_SUB 13
727 #define T2_OPCODE_RSB 14
729 #define T_OPCODE_MUL 0x4340
730 #define T_OPCODE_TST 0x4200
731 #define T_OPCODE_CMN 0x42c0
732 #define T_OPCODE_NEG 0x4240
733 #define T_OPCODE_MVN 0x43c0
735 #define T_OPCODE_ADD_R3 0x1800
736 #define T_OPCODE_SUB_R3 0x1a00
737 #define T_OPCODE_ADD_HI 0x4400
738 #define T_OPCODE_ADD_ST 0xb000
739 #define T_OPCODE_SUB_ST 0xb080
740 #define T_OPCODE_ADD_SP 0xa800
741 #define T_OPCODE_ADD_PC 0xa000
742 #define T_OPCODE_ADD_I8 0x3000
743 #define T_OPCODE_SUB_I8 0x3800
744 #define T_OPCODE_ADD_I3 0x1c00
745 #define T_OPCODE_SUB_I3 0x1e00
747 #define T_OPCODE_ASR_R 0x4100
748 #define T_OPCODE_LSL_R 0x4080
749 #define T_OPCODE_LSR_R 0x40c0
750 #define T_OPCODE_ROR_R 0x41c0
751 #define T_OPCODE_ASR_I 0x1000
752 #define T_OPCODE_LSL_I 0x0000
753 #define T_OPCODE_LSR_I 0x0800
755 #define T_OPCODE_MOV_I8 0x2000
756 #define T_OPCODE_CMP_I8 0x2800
757 #define T_OPCODE_CMP_LR 0x4280
758 #define T_OPCODE_MOV_HR 0x4600
759 #define T_OPCODE_CMP_HR 0x4500
761 #define T_OPCODE_LDR_PC 0x4800
762 #define T_OPCODE_LDR_SP 0x9800
763 #define T_OPCODE_STR_SP 0x9000
764 #define T_OPCODE_LDR_IW 0x6800
765 #define T_OPCODE_STR_IW 0x6000
766 #define T_OPCODE_LDR_IH 0x8800
767 #define T_OPCODE_STR_IH 0x8000
768 #define T_OPCODE_LDR_IB 0x7800
769 #define T_OPCODE_STR_IB 0x7000
770 #define T_OPCODE_LDR_RW 0x5800
771 #define T_OPCODE_STR_RW 0x5000
772 #define T_OPCODE_LDR_RH 0x5a00
773 #define T_OPCODE_STR_RH 0x5200
774 #define T_OPCODE_LDR_RB 0x5c00
775 #define T_OPCODE_STR_RB 0x5400
777 #define T_OPCODE_PUSH 0xb400
778 #define T_OPCODE_POP 0xbc00
780 #define T_OPCODE_BRANCH 0xe000
782 #define THUMB_SIZE 2 /* Size of thumb instruction. */
783 #define THUMB_PP_PC_LR 0x0100
784 #define THUMB_LOAD_BIT 0x0800
785 #define THUMB2_LOAD_BIT 0x00100000
787 #define BAD_ARGS _("bad arguments to instruction")
788 #define BAD_SP _("r13 not allowed here")
789 #define BAD_PC _("r15 not allowed here")
790 #define BAD_COND _("instruction cannot be conditional")
791 #define BAD_OVERLAP _("registers may not be the same")
792 #define BAD_HIREG _("lo register required")
793 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
794 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
795 #define BAD_BRANCH _("branch must be last instruction in IT block")
796 #define BAD_NOT_IT _("instruction not allowed in IT block")
797 #define BAD_FPU _("selected FPU does not support instruction")
798 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
799 #define BAD_IT_COND _("incorrect condition in IT block")
800 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
801 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
802 #define BAD_PC_ADDRESSING \
803 _("cannot use register index with PC-relative addressing")
804 #define BAD_PC_WRITEBACK \
805 _("cannot use writeback with PC-relative addressing")
806 #define BAD_RANGE _("branch out of range")
807 #define BAD_FP16 _("selected processor does not support fp16 instruction")
808 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
809 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
811 static struct hash_control
* arm_ops_hsh
;
812 static struct hash_control
* arm_cond_hsh
;
813 static struct hash_control
* arm_shift_hsh
;
814 static struct hash_control
* arm_psr_hsh
;
815 static struct hash_control
* arm_v7m_psr_hsh
;
816 static struct hash_control
* arm_reg_hsh
;
817 static struct hash_control
* arm_reloc_hsh
;
818 static struct hash_control
* arm_barrier_opt_hsh
;
820 /* Stuff needed to resolve the label ambiguity
829 symbolS
* last_label_seen
;
830 static int label_is_thumb_function_name
= FALSE
;
832 /* Literal pool structure. Held on a per-section
833 and per-sub-section basis. */
835 #define MAX_LITERAL_POOL_SIZE 1024
836 typedef struct literal_pool
838 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
839 unsigned int next_free_entry
;
845 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
847 struct literal_pool
* next
;
848 unsigned int alignment
;
851 /* Pointer to a linked list of literal pools. */
852 literal_pool
* list_of_pools
= NULL
;
854 typedef enum asmfunc_states
857 WAITING_ASMFUNC_NAME
,
861 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
864 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
866 static struct current_it now_it
;
870 now_it_compatible (int cond
)
872 return (cond
& ~1) == (now_it
.cc
& ~1);
876 conditional_insn (void)
878 return inst
.cond
!= COND_ALWAYS
;
881 static int in_it_block (void);
883 static int handle_it_state (void);
885 static void force_automatic_it_block_close (void);
887 static void it_fsm_post_encode (void);
889 #define set_it_insn_type(type) \
892 inst.it_insn_type = type; \
893 if (handle_it_state () == FAIL) \
898 #define set_it_insn_type_nonvoid(type, failret) \
901 inst.it_insn_type = type; \
902 if (handle_it_state () == FAIL) \
907 #define set_it_insn_type_last() \
910 if (inst.cond == COND_ALWAYS) \
911 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
913 set_it_insn_type (INSIDE_IT_LAST_INSN); \
919 /* This array holds the chars that always start a comment. If the
920 pre-processor is disabled, these aren't very useful. */
921 char arm_comment_chars
[] = "@";
923 /* This array holds the chars that only start a comment at the beginning of
924 a line. If the line seems to have the form '# 123 filename'
925 .line and .file directives will appear in the pre-processed output. */
926 /* Note that input_file.c hand checks for '#' at the beginning of the
927 first line of the input file. This is because the compiler outputs
928 #NO_APP at the beginning of its output. */
929 /* Also note that comments like this one will always work. */
930 const char line_comment_chars
[] = "#";
932 char arm_line_separator_chars
[] = ";";
934 /* Chars that can be used to separate mant
935 from exp in floating point numbers. */
936 const char EXP_CHARS
[] = "eE";
938 /* Chars that mean this number is a floating point constant. */
942 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
944 /* Prefix characters that indicate the start of an immediate
946 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
948 /* Separator character handling. */
950 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
953 skip_past_char (char ** str
, char c
)
955 /* PR gas/14987: Allow for whitespace before the expected character. */
956 skip_whitespace (*str
);
967 #define skip_past_comma(str) skip_past_char (str, ',')
969 /* Arithmetic expressions (possibly involving symbols). */
971 /* Return TRUE if anything in the expression is a bignum. */
974 walk_no_bignums (symbolS
* sp
)
976 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
979 if (symbol_get_value_expression (sp
)->X_add_symbol
)
981 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
982 || (symbol_get_value_expression (sp
)->X_op_symbol
983 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
989 static int in_my_get_expression
= 0;
991 /* Third argument to my_get_expression. */
992 #define GE_NO_PREFIX 0
993 #define GE_IMM_PREFIX 1
994 #define GE_OPT_PREFIX 2
995 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
996 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
997 #define GE_OPT_PREFIX_BIG 3
1000 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1005 /* In unified syntax, all prefixes are optional. */
1007 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1010 switch (prefix_mode
)
1012 case GE_NO_PREFIX
: break;
1014 if (!is_immediate_prefix (**str
))
1016 inst
.error
= _("immediate expression requires a # prefix");
1022 case GE_OPT_PREFIX_BIG
:
1023 if (is_immediate_prefix (**str
))
1029 memset (ep
, 0, sizeof (expressionS
));
1031 save_in
= input_line_pointer
;
1032 input_line_pointer
= *str
;
1033 in_my_get_expression
= 1;
1034 seg
= expression (ep
);
1035 in_my_get_expression
= 0;
1037 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1039 /* We found a bad or missing expression in md_operand(). */
1040 *str
= input_line_pointer
;
1041 input_line_pointer
= save_in
;
1042 if (inst
.error
== NULL
)
1043 inst
.error
= (ep
->X_op
== O_absent
1044 ? _("missing expression") :_("bad expression"));
1049 if (seg
!= absolute_section
1050 && seg
!= text_section
1051 && seg
!= data_section
1052 && seg
!= bss_section
1053 && seg
!= undefined_section
)
1055 inst
.error
= _("bad segment");
1056 *str
= input_line_pointer
;
1057 input_line_pointer
= save_in
;
1064 /* Get rid of any bignums now, so that we don't generate an error for which
1065 we can't establish a line number later on. Big numbers are never valid
1066 in instructions, which is where this routine is always called. */
1067 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1068 && (ep
->X_op
== O_big
1069 || (ep
->X_add_symbol
1070 && (walk_no_bignums (ep
->X_add_symbol
)
1072 && walk_no_bignums (ep
->X_op_symbol
))))))
1074 inst
.error
= _("invalid constant");
1075 *str
= input_line_pointer
;
1076 input_line_pointer
= save_in
;
1080 *str
= input_line_pointer
;
1081 input_line_pointer
= save_in
;
1085 /* Turn a string in input_line_pointer into a floating point constant
1086 of type TYPE, and store the appropriate bytes in *LITP. The number
1087 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1088 returned, or NULL on OK.
1090 Note that fp constants aren't represent in the normal way on the ARM.
1091 In big endian mode, things are as expected. However, in little endian
1092 mode fp constants are big-endian word-wise, and little-endian byte-wise
1093 within the words. For example, (double) 1.1 in big endian mode is
1094 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1095 the byte sequence 99 99 f1 3f 9a 99 99 99.
1097 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1100 md_atof (int type
, char * litP
, int * sizeP
)
1103 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1135 return _("Unrecognized or unsupported floating point constant");
1138 t
= atof_ieee (input_line_pointer
, type
, words
);
1140 input_line_pointer
= t
;
1141 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1143 if (target_big_endian
)
1145 for (i
= 0; i
< prec
; i
++)
1147 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1148 litP
+= sizeof (LITTLENUM_TYPE
);
1153 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1154 for (i
= prec
- 1; i
>= 0; i
--)
1156 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1157 litP
+= sizeof (LITTLENUM_TYPE
);
1160 /* For a 4 byte float the order of elements in `words' is 1 0.
1161 For an 8 byte float the order is 1 0 3 2. */
1162 for (i
= 0; i
< prec
; i
+= 2)
1164 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1165 sizeof (LITTLENUM_TYPE
));
1166 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1167 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1168 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1175 /* We handle all bad expressions here, so that we can report the faulty
1176 instruction in the error message. */
1178 md_operand (expressionS
* exp
)
1180 if (in_my_get_expression
)
1181 exp
->X_op
= O_illegal
;
1184 /* Immediate values. */
1186 /* Generic immediate-value read function for use in directives.
1187 Accepts anything that 'expression' can fold to a constant.
1188 *val receives the number. */
1191 immediate_for_directive (int *val
)
1194 exp
.X_op
= O_illegal
;
1196 if (is_immediate_prefix (*input_line_pointer
))
1198 input_line_pointer
++;
1202 if (exp
.X_op
!= O_constant
)
1204 as_bad (_("expected #constant"));
1205 ignore_rest_of_line ();
1208 *val
= exp
.X_add_number
;
1213 /* Register parsing. */
1215 /* Generic register parser. CCP points to what should be the
1216 beginning of a register name. If it is indeed a valid register
1217 name, advance CCP over it and return the reg_entry structure;
1218 otherwise return NULL. Does not issue diagnostics. */
1220 static struct reg_entry
*
1221 arm_reg_parse_multi (char **ccp
)
1225 struct reg_entry
*reg
;
1227 skip_whitespace (start
);
1229 #ifdef REGISTER_PREFIX
1230 if (*start
!= REGISTER_PREFIX
)
1234 #ifdef OPTIONAL_REGISTER_PREFIX
1235 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1240 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1245 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1247 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1257 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1258 enum arm_reg_type type
)
1260 /* Alternative syntaxes are accepted for a few register classes. */
1267 /* Generic coprocessor register names are allowed for these. */
1268 if (reg
&& reg
->type
== REG_TYPE_CN
)
1273 /* For backward compatibility, a bare number is valid here. */
1275 unsigned long processor
= strtoul (start
, ccp
, 10);
1276 if (*ccp
!= start
&& processor
<= 15)
1280 case REG_TYPE_MMXWC
:
1281 /* WC includes WCG. ??? I'm not sure this is true for all
1282 instructions that take WC registers. */
1283 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1294 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1295 return value is the register number or FAIL. */
1298 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1301 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1304 /* Do not allow a scalar (reg+index) to parse as a register. */
1305 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1308 if (reg
&& reg
->type
== type
)
1311 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1318 /* Parse a Neon type specifier. *STR should point at the leading '.'
1319 character. Does no verification at this stage that the type fits the opcode
1326 Can all be legally parsed by this function.
1328 Fills in neon_type struct pointer with parsed information, and updates STR
1329 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1330 type, FAIL if not. */
1333 parse_neon_type (struct neon_type
*type
, char **str
)
1340 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1342 enum neon_el_type thistype
= NT_untyped
;
1343 unsigned thissize
= -1u;
1350 /* Just a size without an explicit type. */
1354 switch (TOLOWER (*ptr
))
1356 case 'i': thistype
= NT_integer
; break;
1357 case 'f': thistype
= NT_float
; break;
1358 case 'p': thistype
= NT_poly
; break;
1359 case 's': thistype
= NT_signed
; break;
1360 case 'u': thistype
= NT_unsigned
; break;
1362 thistype
= NT_float
;
1367 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1373 /* .f is an abbreviation for .f32. */
1374 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1379 thissize
= strtoul (ptr
, &ptr
, 10);
1381 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1384 as_bad (_("bad size %d in type specifier"), thissize
);
1392 type
->el
[type
->elems
].type
= thistype
;
1393 type
->el
[type
->elems
].size
= thissize
;
1398 /* Empty/missing type is not a successful parse. */
1399 if (type
->elems
== 0)
1407 /* Errors may be set multiple times during parsing or bit encoding
1408 (particularly in the Neon bits), but usually the earliest error which is set
1409 will be the most meaningful. Avoid overwriting it with later (cascading)
1410 errors by calling this function. */
1413 first_error (const char *err
)
1419 /* Parse a single type, e.g. ".s32", leading period included. */
1421 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1424 struct neon_type optype
;
1428 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1430 if (optype
.elems
== 1)
1431 *vectype
= optype
.el
[0];
1434 first_error (_("only one type should be specified for operand"));
1440 first_error (_("vector type expected"));
1452 /* Special meanings for indices (which have a range of 0-7), which will fit into
1455 #define NEON_ALL_LANES 15
1456 #define NEON_INTERLEAVE_LANES 14
1458 /* Parse either a register or a scalar, with an optional type. Return the
1459 register number, and optionally fill in the actual type of the register
1460 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1461 type/index information in *TYPEINFO. */
1464 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1465 enum arm_reg_type
*rtype
,
1466 struct neon_typed_alias
*typeinfo
)
1469 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1470 struct neon_typed_alias atype
;
1471 struct neon_type_el parsetype
;
1475 atype
.eltype
.type
= NT_invtype
;
1476 atype
.eltype
.size
= -1;
1478 /* Try alternate syntax for some types of register. Note these are mutually
1479 exclusive with the Neon syntax extensions. */
1482 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1490 /* Undo polymorphism when a set of register types may be accepted. */
1491 if ((type
== REG_TYPE_NDQ
1492 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1493 || (type
== REG_TYPE_VFSD
1494 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1495 || (type
== REG_TYPE_NSDQ
1496 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1497 || reg
->type
== REG_TYPE_NQ
))
1498 || (type
== REG_TYPE_MMXWC
1499 && (reg
->type
== REG_TYPE_MMXWCG
)))
1500 type
= (enum arm_reg_type
) reg
->type
;
1502 if (type
!= reg
->type
)
1508 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1510 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1512 first_error (_("can't redefine type for operand"));
1515 atype
.defined
|= NTA_HASTYPE
;
1516 atype
.eltype
= parsetype
;
1519 if (skip_past_char (&str
, '[') == SUCCESS
)
1521 if (type
!= REG_TYPE_VFD
)
1523 first_error (_("only D registers may be indexed"));
1527 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1529 first_error (_("can't change index for operand"));
1533 atype
.defined
|= NTA_HASINDEX
;
1535 if (skip_past_char (&str
, ']') == SUCCESS
)
1536 atype
.index
= NEON_ALL_LANES
;
1541 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1543 if (exp
.X_op
!= O_constant
)
1545 first_error (_("constant expression required"));
1549 if (skip_past_char (&str
, ']') == FAIL
)
1552 atype
.index
= exp
.X_add_number
;
1567 /* Like arm_reg_parse, but allow allow the following extra features:
1568 - If RTYPE is non-zero, return the (possibly restricted) type of the
1569 register (e.g. Neon double or quad reg when either has been requested).
1570 - If this is a Neon vector type with additional type information, fill
1571 in the struct pointed to by VECTYPE (if non-NULL).
1572 This function will fault on encountering a scalar. */
1575 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1576 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1578 struct neon_typed_alias atype
;
1580 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1585 /* Do not allow regname(... to parse as a register. */
1589 /* Do not allow a scalar (reg+index) to parse as a register. */
1590 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1592 first_error (_("register operand expected, but got scalar"));
1597 *vectype
= atype
.eltype
;
1604 #define NEON_SCALAR_REG(X) ((X) >> 4)
1605 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1607 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1608 have enough information to be able to do a good job bounds-checking. So, we
1609 just do easy checks here, and do further checks later. */
1612 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1616 struct neon_typed_alias atype
;
1618 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1620 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1623 if (atype
.index
== NEON_ALL_LANES
)
1625 first_error (_("scalar must have an index"));
1628 else if (atype
.index
>= 64 / elsize
)
1630 first_error (_("scalar index out of range"));
1635 *type
= atype
.eltype
;
1639 return reg
* 16 + atype
.index
;
1642 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1645 parse_reg_list (char ** strp
)
1647 char * str
= * strp
;
1651 /* We come back here if we get ranges concatenated by '+' or '|'. */
1654 skip_whitespace (str
);
1668 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1670 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1680 first_error (_("bad range in register list"));
1684 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1686 if (range
& (1 << i
))
1688 (_("Warning: duplicated register (r%d) in register list"),
1696 if (range
& (1 << reg
))
1697 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1699 else if (reg
<= cur_reg
)
1700 as_tsktsk (_("Warning: register range not in ascending order"));
1705 while (skip_past_comma (&str
) != FAIL
1706 || (in_range
= 1, *str
++ == '-'));
1709 if (skip_past_char (&str
, '}') == FAIL
)
1711 first_error (_("missing `}'"));
1719 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1722 if (exp
.X_op
== O_constant
)
1724 if (exp
.X_add_number
1725 != (exp
.X_add_number
& 0x0000ffff))
1727 inst
.error
= _("invalid register mask");
1731 if ((range
& exp
.X_add_number
) != 0)
1733 int regno
= range
& exp
.X_add_number
;
1736 regno
= (1 << regno
) - 1;
1738 (_("Warning: duplicated register (r%d) in register list"),
1742 range
|= exp
.X_add_number
;
1746 if (inst
.reloc
.type
!= 0)
1748 inst
.error
= _("expression too complex");
1752 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1753 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1754 inst
.reloc
.pc_rel
= 0;
1758 if (*str
== '|' || *str
== '+')
1764 while (another_range
);
1770 /* Types of registers in a list. */
1779 /* Parse a VFP register list. If the string is invalid return FAIL.
1780 Otherwise return the number of registers, and set PBASE to the first
1781 register. Parses registers of type ETYPE.
1782 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1783 - Q registers can be used to specify pairs of D registers
1784 - { } can be omitted from around a singleton register list
1785 FIXME: This is not implemented, as it would require backtracking in
1788 This could be done (the meaning isn't really ambiguous), but doesn't
1789 fit in well with the current parsing framework.
1790 - 32 D registers may be used (also true for VFPv3).
1791 FIXME: Types are ignored in these register lists, which is probably a
1795 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1800 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1804 unsigned long mask
= 0;
1807 if (skip_past_char (&str
, '{') == FAIL
)
1809 inst
.error
= _("expecting {");
1816 regtype
= REG_TYPE_VFS
;
1821 regtype
= REG_TYPE_VFD
;
1824 case REGLIST_NEON_D
:
1825 regtype
= REG_TYPE_NDQ
;
1829 if (etype
!= REGLIST_VFP_S
)
1831 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1832 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1836 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1839 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1846 base_reg
= max_regs
;
1850 int setmask
= 1, addregs
= 1;
1852 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1854 if (new_base
== FAIL
)
1856 first_error (_(reg_expected_msgs
[regtype
]));
1860 if (new_base
>= max_regs
)
1862 first_error (_("register out of range in list"));
1866 /* Note: a value of 2 * n is returned for the register Q<n>. */
1867 if (regtype
== REG_TYPE_NQ
)
1873 if (new_base
< base_reg
)
1874 base_reg
= new_base
;
1876 if (mask
& (setmask
<< new_base
))
1878 first_error (_("invalid register list"));
1882 if ((mask
>> new_base
) != 0 && ! warned
)
1884 as_tsktsk (_("register list not in ascending order"));
1888 mask
|= setmask
<< new_base
;
1891 if (*str
== '-') /* We have the start of a range expression */
1897 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1900 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1904 if (high_range
>= max_regs
)
1906 first_error (_("register out of range in list"));
1910 if (regtype
== REG_TYPE_NQ
)
1911 high_range
= high_range
+ 1;
1913 if (high_range
<= new_base
)
1915 inst
.error
= _("register range not in ascending order");
1919 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1921 if (mask
& (setmask
<< new_base
))
1923 inst
.error
= _("invalid register list");
1927 mask
|= setmask
<< new_base
;
1932 while (skip_past_comma (&str
) != FAIL
);
1936 /* Sanity check -- should have raised a parse error above. */
1937 if (count
== 0 || count
> max_regs
)
1942 /* Final test -- the registers must be consecutive. */
1944 for (i
= 0; i
< count
; i
++)
1946 if ((mask
& (1u << i
)) == 0)
1948 inst
.error
= _("non-contiguous register range");
1958 /* True if two alias types are the same. */
1961 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1969 if (a
->defined
!= b
->defined
)
1972 if ((a
->defined
& NTA_HASTYPE
) != 0
1973 && (a
->eltype
.type
!= b
->eltype
.type
1974 || a
->eltype
.size
!= b
->eltype
.size
))
1977 if ((a
->defined
& NTA_HASINDEX
) != 0
1978 && (a
->index
!= b
->index
))
1984 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1985 The base register is put in *PBASE.
1986 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1988 The register stride (minus one) is put in bit 4 of the return value.
1989 Bits [6:5] encode the list length (minus one).
1990 The type of the list elements is put in *ELTYPE, if non-NULL. */
1992 #define NEON_LANE(X) ((X) & 0xf)
1993 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1994 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1997 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1998 struct neon_type_el
*eltype
)
2005 int leading_brace
= 0;
2006 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2007 const char *const incr_error
= _("register stride must be 1 or 2");
2008 const char *const type_error
= _("mismatched element/structure types in list");
2009 struct neon_typed_alias firsttype
;
2010 firsttype
.defined
= 0;
2011 firsttype
.eltype
.type
= NT_invtype
;
2012 firsttype
.eltype
.size
= -1;
2013 firsttype
.index
= -1;
2015 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2020 struct neon_typed_alias atype
;
2021 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2025 first_error (_(reg_expected_msgs
[rtype
]));
2032 if (rtype
== REG_TYPE_NQ
)
2038 else if (reg_incr
== -1)
2040 reg_incr
= getreg
- base_reg
;
2041 if (reg_incr
< 1 || reg_incr
> 2)
2043 first_error (_(incr_error
));
2047 else if (getreg
!= base_reg
+ reg_incr
* count
)
2049 first_error (_(incr_error
));
2053 if (! neon_alias_types_same (&atype
, &firsttype
))
2055 first_error (_(type_error
));
2059 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2063 struct neon_typed_alias htype
;
2064 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2066 lane
= NEON_INTERLEAVE_LANES
;
2067 else if (lane
!= NEON_INTERLEAVE_LANES
)
2069 first_error (_(type_error
));
2074 else if (reg_incr
!= 1)
2076 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2080 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2083 first_error (_(reg_expected_msgs
[rtype
]));
2086 if (! neon_alias_types_same (&htype
, &firsttype
))
2088 first_error (_(type_error
));
2091 count
+= hireg
+ dregs
- getreg
;
2095 /* If we're using Q registers, we can't use [] or [n] syntax. */
2096 if (rtype
== REG_TYPE_NQ
)
2102 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2106 else if (lane
!= atype
.index
)
2108 first_error (_(type_error
));
2112 else if (lane
== -1)
2113 lane
= NEON_INTERLEAVE_LANES
;
2114 else if (lane
!= NEON_INTERLEAVE_LANES
)
2116 first_error (_(type_error
));
2121 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2123 /* No lane set by [x]. We must be interleaving structures. */
2125 lane
= NEON_INTERLEAVE_LANES
;
2128 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2129 || (count
> 1 && reg_incr
== -1))
2131 first_error (_("error parsing element/structure list"));
2135 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2137 first_error (_("expected }"));
2145 *eltype
= firsttype
.eltype
;
2150 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2153 /* Parse an explicit relocation suffix on an expression. This is
2154 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2155 arm_reloc_hsh contains no entries, so this function can only
2156 succeed if there is no () after the word. Returns -1 on error,
2157 BFD_RELOC_UNUSED if there wasn't any suffix. */
2160 parse_reloc (char **str
)
2162 struct reloc_entry
*r
;
2166 return BFD_RELOC_UNUSED
;
2171 while (*q
&& *q
!= ')' && *q
!= ',')
2176 if ((r
= (struct reloc_entry
*)
2177 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2184 /* Directives: register aliases. */
2186 static struct reg_entry
*
2187 insert_reg_alias (char *str
, unsigned number
, int type
)
2189 struct reg_entry
*new_reg
;
2192 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2194 if (new_reg
->builtin
)
2195 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2197 /* Only warn about a redefinition if it's not defined as the
2199 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2200 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2205 name
= xstrdup (str
);
2206 new_reg
= XNEW (struct reg_entry
);
2208 new_reg
->name
= name
;
2209 new_reg
->number
= number
;
2210 new_reg
->type
= type
;
2211 new_reg
->builtin
= FALSE
;
2212 new_reg
->neon
= NULL
;
2214 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2221 insert_neon_reg_alias (char *str
, int number
, int type
,
2222 struct neon_typed_alias
*atype
)
2224 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2228 first_error (_("attempt to redefine typed alias"));
2234 reg
->neon
= XNEW (struct neon_typed_alias
);
2235 *reg
->neon
= *atype
;
2239 /* Look for the .req directive. This is of the form:
2241 new_register_name .req existing_register_name
2243 If we find one, or if it looks sufficiently like one that we want to
2244 handle any error here, return TRUE. Otherwise return FALSE. */
2247 create_register_alias (char * newname
, char *p
)
2249 struct reg_entry
*old
;
2250 char *oldname
, *nbuf
;
2253 /* The input scrubber ensures that whitespace after the mnemonic is
2254 collapsed to single spaces. */
2256 if (strncmp (oldname
, " .req ", 6) != 0)
2260 if (*oldname
== '\0')
2263 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2266 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2270 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2271 the desired alias name, and p points to its end. If not, then
2272 the desired alias name is in the global original_case_string. */
2273 #ifdef TC_CASE_SENSITIVE
2276 newname
= original_case_string
;
2277 nlen
= strlen (newname
);
2280 nbuf
= xmemdup0 (newname
, nlen
);
2282 /* Create aliases under the new name as stated; an all-lowercase
2283 version of the new name; and an all-uppercase version of the new
2285 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2287 for (p
= nbuf
; *p
; p
++)
2290 if (strncmp (nbuf
, newname
, nlen
))
2292 /* If this attempt to create an additional alias fails, do not bother
2293 trying to create the all-lower case alias. We will fail and issue
2294 a second, duplicate error message. This situation arises when the
2295 programmer does something like:
2298 The second .req creates the "Foo" alias but then fails to create
2299 the artificial FOO alias because it has already been created by the
2301 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2308 for (p
= nbuf
; *p
; p
++)
2311 if (strncmp (nbuf
, newname
, nlen
))
2312 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2319 /* Create a Neon typed/indexed register alias using directives, e.g.:
2324 These typed registers can be used instead of the types specified after the
2325 Neon mnemonic, so long as all operands given have types. Types can also be
2326 specified directly, e.g.:
2327 vadd d0.s32, d1.s32, d2.s32 */
2330 create_neon_reg_alias (char *newname
, char *p
)
2332 enum arm_reg_type basetype
;
2333 struct reg_entry
*basereg
;
2334 struct reg_entry mybasereg
;
2335 struct neon_type ntype
;
2336 struct neon_typed_alias typeinfo
;
2337 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2340 typeinfo
.defined
= 0;
2341 typeinfo
.eltype
.type
= NT_invtype
;
2342 typeinfo
.eltype
.size
= -1;
2343 typeinfo
.index
= -1;
2347 if (strncmp (p
, " .dn ", 5) == 0)
2348 basetype
= REG_TYPE_VFD
;
2349 else if (strncmp (p
, " .qn ", 5) == 0)
2350 basetype
= REG_TYPE_NQ
;
2359 basereg
= arm_reg_parse_multi (&p
);
2361 if (basereg
&& basereg
->type
!= basetype
)
2363 as_bad (_("bad type for register"));
2367 if (basereg
== NULL
)
2370 /* Try parsing as an integer. */
2371 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2372 if (exp
.X_op
!= O_constant
)
2374 as_bad (_("expression must be constant"));
2377 basereg
= &mybasereg
;
2378 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2384 typeinfo
= *basereg
->neon
;
2386 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2388 /* We got a type. */
2389 if (typeinfo
.defined
& NTA_HASTYPE
)
2391 as_bad (_("can't redefine the type of a register alias"));
2395 typeinfo
.defined
|= NTA_HASTYPE
;
2396 if (ntype
.elems
!= 1)
2398 as_bad (_("you must specify a single type only"));
2401 typeinfo
.eltype
= ntype
.el
[0];
2404 if (skip_past_char (&p
, '[') == SUCCESS
)
2407 /* We got a scalar index. */
2409 if (typeinfo
.defined
& NTA_HASINDEX
)
2411 as_bad (_("can't redefine the index of a scalar alias"));
2415 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2417 if (exp
.X_op
!= O_constant
)
2419 as_bad (_("scalar index must be constant"));
2423 typeinfo
.defined
|= NTA_HASINDEX
;
2424 typeinfo
.index
= exp
.X_add_number
;
2426 if (skip_past_char (&p
, ']') == FAIL
)
2428 as_bad (_("expecting ]"));
2433 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2434 the desired alias name, and p points to its end. If not, then
2435 the desired alias name is in the global original_case_string. */
2436 #ifdef TC_CASE_SENSITIVE
2437 namelen
= nameend
- newname
;
2439 newname
= original_case_string
;
2440 namelen
= strlen (newname
);
2443 namebuf
= xmemdup0 (newname
, namelen
);
2445 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2446 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2448 /* Insert name in all uppercase. */
2449 for (p
= namebuf
; *p
; p
++)
2452 if (strncmp (namebuf
, newname
, namelen
))
2453 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2454 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2456 /* Insert name in all lowercase. */
2457 for (p
= namebuf
; *p
; p
++)
2460 if (strncmp (namebuf
, newname
, namelen
))
2461 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2462 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2468 /* Should never be called, as .req goes between the alias and the
2469 register name, not at the beginning of the line. */
2472 s_req (int a ATTRIBUTE_UNUSED
)
2474 as_bad (_("invalid syntax for .req directive"));
2478 s_dn (int a ATTRIBUTE_UNUSED
)
2480 as_bad (_("invalid syntax for .dn directive"));
2484 s_qn (int a ATTRIBUTE_UNUSED
)
2486 as_bad (_("invalid syntax for .qn directive"));
2489 /* The .unreq directive deletes an alias which was previously defined
2490 by .req. For example:
2496 s_unreq (int a ATTRIBUTE_UNUSED
)
2501 name
= input_line_pointer
;
2503 while (*input_line_pointer
!= 0
2504 && *input_line_pointer
!= ' '
2505 && *input_line_pointer
!= '\n')
2506 ++input_line_pointer
;
2508 saved_char
= *input_line_pointer
;
2509 *input_line_pointer
= 0;
2512 as_bad (_("invalid syntax for .unreq directive"));
2515 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2519 as_bad (_("unknown register alias '%s'"), name
);
2520 else if (reg
->builtin
)
2521 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2528 hash_delete (arm_reg_hsh
, name
, FALSE
);
2529 free ((char *) reg
->name
);
2534 /* Also locate the all upper case and all lower case versions.
2535 Do not complain if we cannot find one or the other as it
2536 was probably deleted above. */
2538 nbuf
= strdup (name
);
2539 for (p
= nbuf
; *p
; p
++)
2541 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2544 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2545 free ((char *) reg
->name
);
2551 for (p
= nbuf
; *p
; p
++)
2553 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2556 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2557 free ((char *) reg
->name
);
2567 *input_line_pointer
= saved_char
;
2568 demand_empty_rest_of_line ();
2571 /* Directives: Instruction set selection. */
2574 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2575 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2576 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2577 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2579 /* Create a new mapping symbol for the transition to STATE. */
2582 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2585 const char * symname
;
2592 type
= BSF_NO_FLAGS
;
2596 type
= BSF_NO_FLAGS
;
2600 type
= BSF_NO_FLAGS
;
2606 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2607 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2612 THUMB_SET_FUNC (symbolP
, 0);
2613 ARM_SET_THUMB (symbolP
, 0);
2614 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2618 THUMB_SET_FUNC (symbolP
, 1);
2619 ARM_SET_THUMB (symbolP
, 1);
2620 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2628 /* Save the mapping symbols for future reference. Also check that
2629 we do not place two mapping symbols at the same offset within a
2630 frag. We'll handle overlap between frags in
2631 check_mapping_symbols.
2633 If .fill or other data filling directive generates zero sized data,
2634 the mapping symbol for the following code will have the same value
2635 as the one generated for the data filling directive. In this case,
2636 we replace the old symbol with the new one at the same address. */
2639 if (frag
->tc_frag_data
.first_map
!= NULL
)
2641 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2642 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2644 frag
->tc_frag_data
.first_map
= symbolP
;
2646 if (frag
->tc_frag_data
.last_map
!= NULL
)
2648 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2649 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2650 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2652 frag
->tc_frag_data
.last_map
= symbolP
;
2655 /* We must sometimes convert a region marked as code to data during
2656 code alignment, if an odd number of bytes have to be padded. The
2657 code mapping symbol is pushed to an aligned address. */
2660 insert_data_mapping_symbol (enum mstate state
,
2661 valueT value
, fragS
*frag
, offsetT bytes
)
2663 /* If there was already a mapping symbol, remove it. */
2664 if (frag
->tc_frag_data
.last_map
!= NULL
2665 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2667 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2671 know (frag
->tc_frag_data
.first_map
== symp
);
2672 frag
->tc_frag_data
.first_map
= NULL
;
2674 frag
->tc_frag_data
.last_map
= NULL
;
2675 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2678 make_mapping_symbol (MAP_DATA
, value
, frag
);
2679 make_mapping_symbol (state
, value
+ bytes
, frag
);
2682 static void mapping_state_2 (enum mstate state
, int max_chars
);
2684 /* Set the mapping state to STATE. Only call this when about to
2685 emit some STATE bytes to the file. */
2687 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2689 mapping_state (enum mstate state
)
2691 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2693 if (mapstate
== state
)
2694 /* The mapping symbol has already been emitted.
2695 There is nothing else to do. */
2698 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2700 All ARM instructions require 4-byte alignment.
2701 (Almost) all Thumb instructions require 2-byte alignment.
2703 When emitting instructions into any section, mark the section
2706 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2707 but themselves require 2-byte alignment; this applies to some
2708 PC- relative forms. However, these cases will invovle implicit
2709 literal pool generation or an explicit .align >=2, both of
2710 which will cause the section to me marked with sufficient
2711 alignment. Thus, we don't handle those cases here. */
2712 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2714 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2715 /* This case will be evaluated later. */
2718 mapping_state_2 (state
, 0);
2721 /* Same as mapping_state, but MAX_CHARS bytes have already been
2722 allocated. Put the mapping symbol that far back. */
2725 mapping_state_2 (enum mstate state
, int max_chars
)
2727 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2729 if (!SEG_NORMAL (now_seg
))
2732 if (mapstate
== state
)
2733 /* The mapping symbol has already been emitted.
2734 There is nothing else to do. */
2737 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2738 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2740 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2741 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2744 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2747 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2748 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2752 #define mapping_state(x) ((void)0)
2753 #define mapping_state_2(x, y) ((void)0)
2756 /* Find the real, Thumb encoded start of a Thumb function. */
2760 find_real_start (symbolS
* symbolP
)
2763 const char * name
= S_GET_NAME (symbolP
);
2764 symbolS
* new_target
;
2766 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2767 #define STUB_NAME ".real_start_of"
2772 /* The compiler may generate BL instructions to local labels because
2773 it needs to perform a branch to a far away location. These labels
2774 do not have a corresponding ".real_start_of" label. We check
2775 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2776 the ".real_start_of" convention for nonlocal branches. */
2777 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2780 real_start
= concat (STUB_NAME
, name
, NULL
);
2781 new_target
= symbol_find (real_start
);
2784 if (new_target
== NULL
)
2786 as_warn (_("Failed to find real start of function: %s\n"), name
);
2787 new_target
= symbolP
;
2795 opcode_select (int width
)
2802 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2803 as_bad (_("selected processor does not support THUMB opcodes"));
2806 /* No need to force the alignment, since we will have been
2807 coming from ARM mode, which is word-aligned. */
2808 record_alignment (now_seg
, 1);
2815 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2816 as_bad (_("selected processor does not support ARM opcodes"));
2821 frag_align (2, 0, 0);
2823 record_alignment (now_seg
, 1);
2828 as_bad (_("invalid instruction size selected (%d)"), width
);
2833 s_arm (int ignore ATTRIBUTE_UNUSED
)
2836 demand_empty_rest_of_line ();
2840 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2843 demand_empty_rest_of_line ();
2847 s_code (int unused ATTRIBUTE_UNUSED
)
2851 temp
= get_absolute_expression ();
2856 opcode_select (temp
);
2860 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2865 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2867 /* If we are not already in thumb mode go into it, EVEN if
2868 the target processor does not support thumb instructions.
2869 This is used by gcc/config/arm/lib1funcs.asm for example
2870 to compile interworking support functions even if the
2871 target processor should not support interworking. */
2875 record_alignment (now_seg
, 1);
2878 demand_empty_rest_of_line ();
2882 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2886 /* The following label is the name/address of the start of a Thumb function.
2887 We need to know this for the interworking support. */
2888 label_is_thumb_function_name
= TRUE
;
2891 /* Perform a .set directive, but also mark the alias as
2892 being a thumb function. */
2895 s_thumb_set (int equiv
)
2897 /* XXX the following is a duplicate of the code for s_set() in read.c
2898 We cannot just call that code as we need to get at the symbol that
2905 /* Especial apologies for the random logic:
2906 This just grew, and could be parsed much more simply!
2908 delim
= get_symbol_name (& name
);
2909 end_name
= input_line_pointer
;
2910 (void) restore_line_pointer (delim
);
2912 if (*input_line_pointer
!= ',')
2915 as_bad (_("expected comma after name \"%s\""), name
);
2917 ignore_rest_of_line ();
2921 input_line_pointer
++;
2924 if (name
[0] == '.' && name
[1] == '\0')
2926 /* XXX - this should not happen to .thumb_set. */
2930 if ((symbolP
= symbol_find (name
)) == NULL
2931 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2934 /* When doing symbol listings, play games with dummy fragments living
2935 outside the normal fragment chain to record the file and line info
2937 if (listing
& LISTING_SYMBOLS
)
2939 extern struct list_info_struct
* listing_tail
;
2940 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2942 memset (dummy_frag
, 0, sizeof (fragS
));
2943 dummy_frag
->fr_type
= rs_fill
;
2944 dummy_frag
->line
= listing_tail
;
2945 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2946 dummy_frag
->fr_symbol
= symbolP
;
2950 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2953 /* "set" symbols are local unless otherwise specified. */
2954 SF_SET_LOCAL (symbolP
);
2955 #endif /* OBJ_COFF */
2956 } /* Make a new symbol. */
2958 symbol_table_insert (symbolP
);
2963 && S_IS_DEFINED (symbolP
)
2964 && S_GET_SEGMENT (symbolP
) != reg_section
)
2965 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2967 pseudo_set (symbolP
);
2969 demand_empty_rest_of_line ();
2971 /* XXX Now we come to the Thumb specific bit of code. */
2973 THUMB_SET_FUNC (symbolP
, 1);
2974 ARM_SET_THUMB (symbolP
, 1);
2975 #if defined OBJ_ELF || defined OBJ_COFF
2976 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2980 /* Directives: Mode selection. */
2982 /* .syntax [unified|divided] - choose the new unified syntax
2983 (same for Arm and Thumb encoding, modulo slight differences in what
2984 can be represented) or the old divergent syntax for each mode. */
2986 s_syntax (int unused ATTRIBUTE_UNUSED
)
2990 delim
= get_symbol_name (& name
);
2992 if (!strcasecmp (name
, "unified"))
2993 unified_syntax
= TRUE
;
2994 else if (!strcasecmp (name
, "divided"))
2995 unified_syntax
= FALSE
;
2998 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3001 (void) restore_line_pointer (delim
);
3002 demand_empty_rest_of_line ();
3005 /* Directives: sectioning and alignment. */
3008 s_bss (int ignore ATTRIBUTE_UNUSED
)
3010 /* We don't support putting frags in the BSS segment, we fake it by
3011 marking in_bss, then looking at s_skip for clues. */
3012 subseg_set (bss_section
, 0);
3013 demand_empty_rest_of_line ();
3015 #ifdef md_elf_section_change_hook
3016 md_elf_section_change_hook ();
3021 s_even (int ignore ATTRIBUTE_UNUSED
)
3023 /* Never make frag if expect extra pass. */
3025 frag_align (1, 0, 0);
3027 record_alignment (now_seg
, 1);
3029 demand_empty_rest_of_line ();
3032 /* Directives: CodeComposer Studio. */
3034 /* .ref (for CodeComposer Studio syntax only). */
3036 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3038 if (codecomposer_syntax
)
3039 ignore_rest_of_line ();
3041 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3044 /* If name is not NULL, then it is used for marking the beginning of a
3045 function, wherease if it is NULL then it means the function end. */
3047 asmfunc_debug (const char * name
)
3049 static const char * last_name
= NULL
;
3053 gas_assert (last_name
== NULL
);
3056 if (debug_type
== DEBUG_STABS
)
3057 stabs_generate_asm_func (name
, name
);
3061 gas_assert (last_name
!= NULL
);
3063 if (debug_type
== DEBUG_STABS
)
3064 stabs_generate_asm_endfunc (last_name
, last_name
);
3071 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3073 if (codecomposer_syntax
)
3075 switch (asmfunc_state
)
3077 case OUTSIDE_ASMFUNC
:
3078 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3081 case WAITING_ASMFUNC_NAME
:
3082 as_bad (_(".asmfunc repeated."));
3085 case WAITING_ENDASMFUNC
:
3086 as_bad (_(".asmfunc without function."));
3089 demand_empty_rest_of_line ();
3092 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3096 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3098 if (codecomposer_syntax
)
3100 switch (asmfunc_state
)
3102 case OUTSIDE_ASMFUNC
:
3103 as_bad (_(".endasmfunc without a .asmfunc."));
3106 case WAITING_ASMFUNC_NAME
:
3107 as_bad (_(".endasmfunc without function."));
3110 case WAITING_ENDASMFUNC
:
3111 asmfunc_state
= OUTSIDE_ASMFUNC
;
3112 asmfunc_debug (NULL
);
3115 demand_empty_rest_of_line ();
3118 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3122 s_ccs_def (int name
)
3124 if (codecomposer_syntax
)
3127 as_bad (_(".def pseudo-op only available with -mccs flag."));
3130 /* Directives: Literal pools. */
3132 static literal_pool
*
3133 find_literal_pool (void)
3135 literal_pool
* pool
;
3137 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3139 if (pool
->section
== now_seg
3140 && pool
->sub_section
== now_subseg
)
3147 static literal_pool
*
3148 find_or_make_literal_pool (void)
3150 /* Next literal pool ID number. */
3151 static unsigned int latest_pool_num
= 1;
3152 literal_pool
* pool
;
3154 pool
= find_literal_pool ();
3158 /* Create a new pool. */
3159 pool
= XNEW (literal_pool
);
3163 pool
->next_free_entry
= 0;
3164 pool
->section
= now_seg
;
3165 pool
->sub_section
= now_subseg
;
3166 pool
->next
= list_of_pools
;
3167 pool
->symbol
= NULL
;
3168 pool
->alignment
= 2;
3170 /* Add it to the list. */
3171 list_of_pools
= pool
;
3174 /* New pools, and emptied pools, will have a NULL symbol. */
3175 if (pool
->symbol
== NULL
)
3177 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3178 (valueT
) 0, &zero_address_frag
);
3179 pool
->id
= latest_pool_num
++;
3186 /* Add the literal in the global 'inst'
3187 structure to the relevant literal pool. */
3190 add_to_lit_pool (unsigned int nbytes
)
3192 #define PADDING_SLOT 0x1
3193 #define LIT_ENTRY_SIZE_MASK 0xFF
3194 literal_pool
* pool
;
3195 unsigned int entry
, pool_size
= 0;
3196 bfd_boolean padding_slot_p
= FALSE
;
3202 imm1
= inst
.operands
[1].imm
;
3203 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3204 : inst
.reloc
.exp
.X_unsigned
? 0
3205 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3206 if (target_big_endian
)
3209 imm2
= inst
.operands
[1].imm
;
3213 pool
= find_or_make_literal_pool ();
3215 /* Check if this literal value is already in the pool. */
3216 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3220 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3221 && (inst
.reloc
.exp
.X_op
== O_constant
)
3222 && (pool
->literals
[entry
].X_add_number
3223 == inst
.reloc
.exp
.X_add_number
)
3224 && (pool
->literals
[entry
].X_md
== nbytes
)
3225 && (pool
->literals
[entry
].X_unsigned
3226 == inst
.reloc
.exp
.X_unsigned
))
3229 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3230 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3231 && (pool
->literals
[entry
].X_add_number
3232 == inst
.reloc
.exp
.X_add_number
)
3233 && (pool
->literals
[entry
].X_add_symbol
3234 == inst
.reloc
.exp
.X_add_symbol
)
3235 && (pool
->literals
[entry
].X_op_symbol
3236 == inst
.reloc
.exp
.X_op_symbol
)
3237 && (pool
->literals
[entry
].X_md
== nbytes
))
3240 else if ((nbytes
== 8)
3241 && !(pool_size
& 0x7)
3242 && ((entry
+ 1) != pool
->next_free_entry
)
3243 && (pool
->literals
[entry
].X_op
== O_constant
)
3244 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3245 && (pool
->literals
[entry
].X_unsigned
3246 == inst
.reloc
.exp
.X_unsigned
)
3247 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3248 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3249 && (pool
->literals
[entry
+ 1].X_unsigned
3250 == inst
.reloc
.exp
.X_unsigned
))
3253 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3254 if (padding_slot_p
&& (nbytes
== 4))
3260 /* Do we need to create a new entry? */
3261 if (entry
== pool
->next_free_entry
)
3263 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3265 inst
.error
= _("literal pool overflow");
3271 /* For 8-byte entries, we align to an 8-byte boundary,
3272 and split it into two 4-byte entries, because on 32-bit
3273 host, 8-byte constants are treated as big num, thus
3274 saved in "generic_bignum" which will be overwritten
3275 by later assignments.
3277 We also need to make sure there is enough space for
3280 We also check to make sure the literal operand is a
3282 if (!(inst
.reloc
.exp
.X_op
== O_constant
3283 || inst
.reloc
.exp
.X_op
== O_big
))
3285 inst
.error
= _("invalid type for literal pool");
3288 else if (pool_size
& 0x7)
3290 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3292 inst
.error
= _("literal pool overflow");
3296 pool
->literals
[entry
] = inst
.reloc
.exp
;
3297 pool
->literals
[entry
].X_op
= O_constant
;
3298 pool
->literals
[entry
].X_add_number
= 0;
3299 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3300 pool
->next_free_entry
+= 1;
3303 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3305 inst
.error
= _("literal pool overflow");
3309 pool
->literals
[entry
] = inst
.reloc
.exp
;
3310 pool
->literals
[entry
].X_op
= O_constant
;
3311 pool
->literals
[entry
].X_add_number
= imm1
;
3312 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3313 pool
->literals
[entry
++].X_md
= 4;
3314 pool
->literals
[entry
] = inst
.reloc
.exp
;
3315 pool
->literals
[entry
].X_op
= O_constant
;
3316 pool
->literals
[entry
].X_add_number
= imm2
;
3317 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3318 pool
->literals
[entry
].X_md
= 4;
3319 pool
->alignment
= 3;
3320 pool
->next_free_entry
+= 1;
3324 pool
->literals
[entry
] = inst
.reloc
.exp
;
3325 pool
->literals
[entry
].X_md
= 4;
3329 /* PR ld/12974: Record the location of the first source line to reference
3330 this entry in the literal pool. If it turns out during linking that the
3331 symbol does not exist we will be able to give an accurate line number for
3332 the (first use of the) missing reference. */
3333 if (debug_type
== DEBUG_DWARF2
)
3334 dwarf2_where (pool
->locs
+ entry
);
3336 pool
->next_free_entry
+= 1;
3338 else if (padding_slot_p
)
3340 pool
->literals
[entry
] = inst
.reloc
.exp
;
3341 pool
->literals
[entry
].X_md
= nbytes
;
3344 inst
.reloc
.exp
.X_op
= O_symbol
;
3345 inst
.reloc
.exp
.X_add_number
= pool_size
;
3346 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3352 tc_start_label_without_colon (void)
3354 bfd_boolean ret
= TRUE
;
3356 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3358 const char *label
= input_line_pointer
;
3360 while (!is_end_of_line
[(int) label
[-1]])
3365 as_bad (_("Invalid label '%s'"), label
);
3369 asmfunc_debug (label
);
3371 asmfunc_state
= WAITING_ENDASMFUNC
;
3377 /* Can't use symbol_new here, so have to create a symbol and then at
3378 a later date assign it a value. Thats what these functions do. */
3381 symbol_locate (symbolS
* symbolP
,
3382 const char * name
, /* It is copied, the caller can modify. */
3383 segT segment
, /* Segment identifier (SEG_<something>). */
3384 valueT valu
, /* Symbol value. */
3385 fragS
* frag
) /* Associated fragment. */
3388 char * preserved_copy_of_name
;
3390 name_length
= strlen (name
) + 1; /* +1 for \0. */
3391 obstack_grow (¬es
, name
, name_length
);
3392 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3394 #ifdef tc_canonicalize_symbol_name
3395 preserved_copy_of_name
=
3396 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3399 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3401 S_SET_SEGMENT (symbolP
, segment
);
3402 S_SET_VALUE (symbolP
, valu
);
3403 symbol_clear_list_pointers (symbolP
);
3405 symbol_set_frag (symbolP
, frag
);
3407 /* Link to end of symbol chain. */
3409 extern int symbol_table_frozen
;
3411 if (symbol_table_frozen
)
3415 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3417 obj_symbol_new_hook (symbolP
);
3419 #ifdef tc_symbol_new_hook
3420 tc_symbol_new_hook (symbolP
);
3424 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3425 #endif /* DEBUG_SYMS */
3429 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3432 literal_pool
* pool
;
3435 pool
= find_literal_pool ();
3437 || pool
->symbol
== NULL
3438 || pool
->next_free_entry
== 0)
3441 /* Align pool as you have word accesses.
3442 Only make a frag if we have to. */
3444 frag_align (pool
->alignment
, 0, 0);
3446 record_alignment (now_seg
, 2);
3449 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3450 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3452 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3454 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3455 (valueT
) frag_now_fix (), frag_now
);
3456 symbol_table_insert (pool
->symbol
);
3458 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3460 #if defined OBJ_COFF || defined OBJ_ELF
3461 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3464 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3467 if (debug_type
== DEBUG_DWARF2
)
3468 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3470 /* First output the expression in the instruction to the pool. */
3471 emit_expr (&(pool
->literals
[entry
]),
3472 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3475 /* Mark the pool as empty. */
3476 pool
->next_free_entry
= 0;
3477 pool
->symbol
= NULL
;
3481 /* Forward declarations for functions below, in the MD interface
3483 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3484 static valueT
create_unwind_entry (int);
3485 static void start_unwind_section (const segT
, int);
3486 static void add_unwind_opcode (valueT
, int);
3487 static void flush_pending_unwind (void);
3489 /* Directives: Data. */
3492 s_arm_elf_cons (int nbytes
)
3496 #ifdef md_flush_pending_output
3497 md_flush_pending_output ();
3500 if (is_it_end_of_statement ())
3502 demand_empty_rest_of_line ();
3506 #ifdef md_cons_align
3507 md_cons_align (nbytes
);
3510 mapping_state (MAP_DATA
);
3514 char *base
= input_line_pointer
;
3518 if (exp
.X_op
!= O_symbol
)
3519 emit_expr (&exp
, (unsigned int) nbytes
);
3522 char *before_reloc
= input_line_pointer
;
3523 reloc
= parse_reloc (&input_line_pointer
);
3526 as_bad (_("unrecognized relocation suffix"));
3527 ignore_rest_of_line ();
3530 else if (reloc
== BFD_RELOC_UNUSED
)
3531 emit_expr (&exp
, (unsigned int) nbytes
);
3534 reloc_howto_type
*howto
= (reloc_howto_type
*)
3535 bfd_reloc_type_lookup (stdoutput
,
3536 (bfd_reloc_code_real_type
) reloc
);
3537 int size
= bfd_get_reloc_size (howto
);
3539 if (reloc
== BFD_RELOC_ARM_PLT32
)
3541 as_bad (_("(plt) is only valid on branch targets"));
3542 reloc
= BFD_RELOC_UNUSED
;
3547 as_bad (_("%s relocations do not fit in %d bytes"),
3548 howto
->name
, nbytes
);
3551 /* We've parsed an expression stopping at O_symbol.
3552 But there may be more expression left now that we
3553 have parsed the relocation marker. Parse it again.
3554 XXX Surely there is a cleaner way to do this. */
3555 char *p
= input_line_pointer
;
3557 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3559 memcpy (save_buf
, base
, input_line_pointer
- base
);
3560 memmove (base
+ (input_line_pointer
- before_reloc
),
3561 base
, before_reloc
- base
);
3563 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3565 memcpy (base
, save_buf
, p
- base
);
3567 offset
= nbytes
- size
;
3568 p
= frag_more (nbytes
);
3569 memset (p
, 0, nbytes
);
3570 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3571 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3577 while (*input_line_pointer
++ == ',');
3579 /* Put terminator back into stream. */
3580 input_line_pointer
--;
3581 demand_empty_rest_of_line ();
3584 /* Emit an expression containing a 32-bit thumb instruction.
3585 Implementation based on put_thumb32_insn. */
3588 emit_thumb32_expr (expressionS
* exp
)
3590 expressionS exp_high
= *exp
;
3592 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3593 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3594 exp
->X_add_number
&= 0xffff;
3595 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3598 /* Guess the instruction size based on the opcode. */
3601 thumb_insn_size (int opcode
)
3603 if ((unsigned int) opcode
< 0xe800u
)
3605 else if ((unsigned int) opcode
>= 0xe8000000u
)
3612 emit_insn (expressionS
*exp
, int nbytes
)
3616 if (exp
->X_op
== O_constant
)
3621 size
= thumb_insn_size (exp
->X_add_number
);
3625 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3627 as_bad (_(".inst.n operand too big. "\
3628 "Use .inst.w instead"));
3633 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3634 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3636 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3638 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3639 emit_thumb32_expr (exp
);
3641 emit_expr (exp
, (unsigned int) size
);
3643 it_fsm_post_encode ();
3647 as_bad (_("cannot determine Thumb instruction size. " \
3648 "Use .inst.n/.inst.w instead"));
3651 as_bad (_("constant expression required"));
3656 /* Like s_arm_elf_cons but do not use md_cons_align and
3657 set the mapping state to MAP_ARM/MAP_THUMB. */
3660 s_arm_elf_inst (int nbytes
)
3662 if (is_it_end_of_statement ())
3664 demand_empty_rest_of_line ();
3668 /* Calling mapping_state () here will not change ARM/THUMB,
3669 but will ensure not to be in DATA state. */
3672 mapping_state (MAP_THUMB
);
3677 as_bad (_("width suffixes are invalid in ARM mode"));
3678 ignore_rest_of_line ();
3684 mapping_state (MAP_ARM
);
3693 if (! emit_insn (& exp
, nbytes
))
3695 ignore_rest_of_line ();
3699 while (*input_line_pointer
++ == ',');
3701 /* Put terminator back into stream. */
3702 input_line_pointer
--;
3703 demand_empty_rest_of_line ();
3706 /* Parse a .rel31 directive. */
3709 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3716 if (*input_line_pointer
== '1')
3717 highbit
= 0x80000000;
3718 else if (*input_line_pointer
!= '0')
3719 as_bad (_("expected 0 or 1"));
3721 input_line_pointer
++;
3722 if (*input_line_pointer
!= ',')
3723 as_bad (_("missing comma"));
3724 input_line_pointer
++;
3726 #ifdef md_flush_pending_output
3727 md_flush_pending_output ();
3730 #ifdef md_cons_align
3734 mapping_state (MAP_DATA
);
3739 md_number_to_chars (p
, highbit
, 4);
3740 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3741 BFD_RELOC_ARM_PREL31
);
3743 demand_empty_rest_of_line ();
3746 /* Directives: AEABI stack-unwind tables. */
3748 /* Parse an unwind_fnstart directive. Simply records the current location. */
3751 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3753 demand_empty_rest_of_line ();
3754 if (unwind
.proc_start
)
3756 as_bad (_("duplicate .fnstart directive"));
3760 /* Mark the start of the function. */
3761 unwind
.proc_start
= expr_build_dot ();
3763 /* Reset the rest of the unwind info. */
3764 unwind
.opcode_count
= 0;
3765 unwind
.table_entry
= NULL
;
3766 unwind
.personality_routine
= NULL
;
3767 unwind
.personality_index
= -1;
3768 unwind
.frame_size
= 0;
3769 unwind
.fp_offset
= 0;
3770 unwind
.fp_reg
= REG_SP
;
3772 unwind
.sp_restored
= 0;
3776 /* Parse a handlerdata directive. Creates the exception handling table entry
3777 for the function. */
3780 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3782 demand_empty_rest_of_line ();
3783 if (!unwind
.proc_start
)
3784 as_bad (MISSING_FNSTART
);
3786 if (unwind
.table_entry
)
3787 as_bad (_("duplicate .handlerdata directive"));
3789 create_unwind_entry (1);
3792 /* Parse an unwind_fnend directive. Generates the index table entry. */
3795 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3800 unsigned int marked_pr_dependency
;
3802 demand_empty_rest_of_line ();
3804 if (!unwind
.proc_start
)
3806 as_bad (_(".fnend directive without .fnstart"));
3810 /* Add eh table entry. */
3811 if (unwind
.table_entry
== NULL
)
3812 val
= create_unwind_entry (0);
3816 /* Add index table entry. This is two words. */
3817 start_unwind_section (unwind
.saved_seg
, 1);
3818 frag_align (2, 0, 0);
3819 record_alignment (now_seg
, 2);
3821 ptr
= frag_more (8);
3823 where
= frag_now_fix () - 8;
3825 /* Self relative offset of the function start. */
3826 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3827 BFD_RELOC_ARM_PREL31
);
3829 /* Indicate dependency on EHABI-defined personality routines to the
3830 linker, if it hasn't been done already. */
3831 marked_pr_dependency
3832 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3833 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3834 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3836 static const char *const name
[] =
3838 "__aeabi_unwind_cpp_pr0",
3839 "__aeabi_unwind_cpp_pr1",
3840 "__aeabi_unwind_cpp_pr2"
3842 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3843 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3844 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3845 |= 1 << unwind
.personality_index
;
3849 /* Inline exception table entry. */
3850 md_number_to_chars (ptr
+ 4, val
, 4);
3852 /* Self relative offset of the table entry. */
3853 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3854 BFD_RELOC_ARM_PREL31
);
3856 /* Restore the original section. */
3857 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3859 unwind
.proc_start
= NULL
;
3863 /* Parse an unwind_cantunwind directive. */
3866 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3868 demand_empty_rest_of_line ();
3869 if (!unwind
.proc_start
)
3870 as_bad (MISSING_FNSTART
);
3872 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3873 as_bad (_("personality routine specified for cantunwind frame"));
3875 unwind
.personality_index
= -2;
3879 /* Parse a personalityindex directive. */
3882 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3886 if (!unwind
.proc_start
)
3887 as_bad (MISSING_FNSTART
);
3889 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3890 as_bad (_("duplicate .personalityindex directive"));
3894 if (exp
.X_op
!= O_constant
3895 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3897 as_bad (_("bad personality routine number"));
3898 ignore_rest_of_line ();
3902 unwind
.personality_index
= exp
.X_add_number
;
3904 demand_empty_rest_of_line ();
3908 /* Parse a personality directive. */
3911 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3915 if (!unwind
.proc_start
)
3916 as_bad (MISSING_FNSTART
);
3918 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3919 as_bad (_("duplicate .personality directive"));
3921 c
= get_symbol_name (& name
);
3922 p
= input_line_pointer
;
3924 ++ input_line_pointer
;
3925 unwind
.personality_routine
= symbol_find_or_make (name
);
3927 demand_empty_rest_of_line ();
3931 /* Parse a directive saving core registers. */
3934 s_arm_unwind_save_core (void)
3940 range
= parse_reg_list (&input_line_pointer
);
3943 as_bad (_("expected register list"));
3944 ignore_rest_of_line ();
3948 demand_empty_rest_of_line ();
3950 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3951 into .unwind_save {..., sp...}. We aren't bothered about the value of
3952 ip because it is clobbered by calls. */
3953 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3954 && (range
& 0x3000) == 0x1000)
3956 unwind
.opcode_count
--;
3957 unwind
.sp_restored
= 0;
3958 range
= (range
| 0x2000) & ~0x1000;
3959 unwind
.pending_offset
= 0;
3965 /* See if we can use the short opcodes. These pop a block of up to 8
3966 registers starting with r4, plus maybe r14. */
3967 for (n
= 0; n
< 8; n
++)
3969 /* Break at the first non-saved register. */
3970 if ((range
& (1 << (n
+ 4))) == 0)
3973 /* See if there are any other bits set. */
3974 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3976 /* Use the long form. */
3977 op
= 0x8000 | ((range
>> 4) & 0xfff);
3978 add_unwind_opcode (op
, 2);
3982 /* Use the short form. */
3984 op
= 0xa8; /* Pop r14. */
3986 op
= 0xa0; /* Do not pop r14. */
3988 add_unwind_opcode (op
, 1);
3995 op
= 0xb100 | (range
& 0xf);
3996 add_unwind_opcode (op
, 2);
3999 /* Record the number of bytes pushed. */
4000 for (n
= 0; n
< 16; n
++)
4002 if (range
& (1 << n
))
4003 unwind
.frame_size
+= 4;
4008 /* Parse a directive saving FPA registers. */
4011 s_arm_unwind_save_fpa (int reg
)
4017 /* Get Number of registers to transfer. */
4018 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4021 exp
.X_op
= O_illegal
;
4023 if (exp
.X_op
!= O_constant
)
4025 as_bad (_("expected , <constant>"));
4026 ignore_rest_of_line ();
4030 num_regs
= exp
.X_add_number
;
4032 if (num_regs
< 1 || num_regs
> 4)
4034 as_bad (_("number of registers must be in the range [1:4]"));
4035 ignore_rest_of_line ();
4039 demand_empty_rest_of_line ();
4044 op
= 0xb4 | (num_regs
- 1);
4045 add_unwind_opcode (op
, 1);
4050 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4051 add_unwind_opcode (op
, 2);
4053 unwind
.frame_size
+= num_regs
* 12;
4057 /* Parse a directive saving VFP registers for ARMv6 and above. */
4060 s_arm_unwind_save_vfp_armv6 (void)
4065 int num_vfpv3_regs
= 0;
4066 int num_regs_below_16
;
4068 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4071 as_bad (_("expected register list"));
4072 ignore_rest_of_line ();
4076 demand_empty_rest_of_line ();
4078 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4079 than FSTMX/FLDMX-style ones). */
4081 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4083 num_vfpv3_regs
= count
;
4084 else if (start
+ count
> 16)
4085 num_vfpv3_regs
= start
+ count
- 16;
4087 if (num_vfpv3_regs
> 0)
4089 int start_offset
= start
> 16 ? start
- 16 : 0;
4090 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4091 add_unwind_opcode (op
, 2);
4094 /* Generate opcode for registers numbered in the range 0 .. 15. */
4095 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4096 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4097 if (num_regs_below_16
> 0)
4099 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4100 add_unwind_opcode (op
, 2);
4103 unwind
.frame_size
+= count
* 8;
4107 /* Parse a directive saving VFP registers for pre-ARMv6. */
4110 s_arm_unwind_save_vfp (void)
4116 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4119 as_bad (_("expected register list"));
4120 ignore_rest_of_line ();
4124 demand_empty_rest_of_line ();
4129 op
= 0xb8 | (count
- 1);
4130 add_unwind_opcode (op
, 1);
4135 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4136 add_unwind_opcode (op
, 2);
4138 unwind
.frame_size
+= count
* 8 + 4;
4142 /* Parse a directive saving iWMMXt data registers. */
4145 s_arm_unwind_save_mmxwr (void)
4153 if (*input_line_pointer
== '{')
4154 input_line_pointer
++;
4158 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4162 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4167 as_tsktsk (_("register list not in ascending order"));
4170 if (*input_line_pointer
== '-')
4172 input_line_pointer
++;
4173 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4176 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4179 else if (reg
>= hi_reg
)
4181 as_bad (_("bad register range"));
4184 for (; reg
< hi_reg
; reg
++)
4188 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4190 skip_past_char (&input_line_pointer
, '}');
4192 demand_empty_rest_of_line ();
4194 /* Generate any deferred opcodes because we're going to be looking at
4196 flush_pending_unwind ();
4198 for (i
= 0; i
< 16; i
++)
4200 if (mask
& (1 << i
))
4201 unwind
.frame_size
+= 8;
4204 /* Attempt to combine with a previous opcode. We do this because gcc
4205 likes to output separate unwind directives for a single block of
4207 if (unwind
.opcode_count
> 0)
4209 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4210 if ((i
& 0xf8) == 0xc0)
4213 /* Only merge if the blocks are contiguous. */
4216 if ((mask
& 0xfe00) == (1 << 9))
4218 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4219 unwind
.opcode_count
--;
4222 else if (i
== 6 && unwind
.opcode_count
>= 2)
4224 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4228 op
= 0xffff << (reg
- 1);
4230 && ((mask
& op
) == (1u << (reg
- 1))))
4232 op
= (1 << (reg
+ i
+ 1)) - 1;
4233 op
&= ~((1 << reg
) - 1);
4235 unwind
.opcode_count
-= 2;
4242 /* We want to generate opcodes in the order the registers have been
4243 saved, ie. descending order. */
4244 for (reg
= 15; reg
>= -1; reg
--)
4246 /* Save registers in blocks. */
4248 || !(mask
& (1 << reg
)))
4250 /* We found an unsaved reg. Generate opcodes to save the
4257 op
= 0xc0 | (hi_reg
- 10);
4258 add_unwind_opcode (op
, 1);
4263 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4264 add_unwind_opcode (op
, 2);
4273 ignore_rest_of_line ();
4277 s_arm_unwind_save_mmxwcg (void)
4284 if (*input_line_pointer
== '{')
4285 input_line_pointer
++;
4287 skip_whitespace (input_line_pointer
);
4291 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4295 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4301 as_tsktsk (_("register list not in ascending order"));
4304 if (*input_line_pointer
== '-')
4306 input_line_pointer
++;
4307 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4310 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4313 else if (reg
>= hi_reg
)
4315 as_bad (_("bad register range"));
4318 for (; reg
< hi_reg
; reg
++)
4322 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4324 skip_past_char (&input_line_pointer
, '}');
4326 demand_empty_rest_of_line ();
4328 /* Generate any deferred opcodes because we're going to be looking at
4330 flush_pending_unwind ();
4332 for (reg
= 0; reg
< 16; reg
++)
4334 if (mask
& (1 << reg
))
4335 unwind
.frame_size
+= 4;
4338 add_unwind_opcode (op
, 2);
4341 ignore_rest_of_line ();
4345 /* Parse an unwind_save directive.
4346 If the argument is non-zero, this is a .vsave directive. */
4349 s_arm_unwind_save (int arch_v6
)
4352 struct reg_entry
*reg
;
4353 bfd_boolean had_brace
= FALSE
;
4355 if (!unwind
.proc_start
)
4356 as_bad (MISSING_FNSTART
);
4358 /* Figure out what sort of save we have. */
4359 peek
= input_line_pointer
;
4367 reg
= arm_reg_parse_multi (&peek
);
4371 as_bad (_("register expected"));
4372 ignore_rest_of_line ();
4381 as_bad (_("FPA .unwind_save does not take a register list"));
4382 ignore_rest_of_line ();
4385 input_line_pointer
= peek
;
4386 s_arm_unwind_save_fpa (reg
->number
);
4390 s_arm_unwind_save_core ();
4395 s_arm_unwind_save_vfp_armv6 ();
4397 s_arm_unwind_save_vfp ();
4400 case REG_TYPE_MMXWR
:
4401 s_arm_unwind_save_mmxwr ();
4404 case REG_TYPE_MMXWCG
:
4405 s_arm_unwind_save_mmxwcg ();
4409 as_bad (_(".unwind_save does not support this kind of register"));
4410 ignore_rest_of_line ();
4415 /* Parse an unwind_movsp directive. */
4418 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4424 if (!unwind
.proc_start
)
4425 as_bad (MISSING_FNSTART
);
4427 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4430 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4431 ignore_rest_of_line ();
4435 /* Optional constant. */
4436 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4438 if (immediate_for_directive (&offset
) == FAIL
)
4444 demand_empty_rest_of_line ();
4446 if (reg
== REG_SP
|| reg
== REG_PC
)
4448 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4452 if (unwind
.fp_reg
!= REG_SP
)
4453 as_bad (_("unexpected .unwind_movsp directive"));
4455 /* Generate opcode to restore the value. */
4457 add_unwind_opcode (op
, 1);
4459 /* Record the information for later. */
4460 unwind
.fp_reg
= reg
;
4461 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4462 unwind
.sp_restored
= 1;
4465 /* Parse an unwind_pad directive. */
4468 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4472 if (!unwind
.proc_start
)
4473 as_bad (MISSING_FNSTART
);
4475 if (immediate_for_directive (&offset
) == FAIL
)
4480 as_bad (_("stack increment must be multiple of 4"));
4481 ignore_rest_of_line ();
4485 /* Don't generate any opcodes, just record the details for later. */
4486 unwind
.frame_size
+= offset
;
4487 unwind
.pending_offset
+= offset
;
4489 demand_empty_rest_of_line ();
4492 /* Parse an unwind_setfp directive. */
4495 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4501 if (!unwind
.proc_start
)
4502 as_bad (MISSING_FNSTART
);
4504 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4505 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4508 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4510 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4512 as_bad (_("expected <reg>, <reg>"));
4513 ignore_rest_of_line ();
4517 /* Optional constant. */
4518 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4520 if (immediate_for_directive (&offset
) == FAIL
)
4526 demand_empty_rest_of_line ();
4528 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4530 as_bad (_("register must be either sp or set by a previous"
4531 "unwind_movsp directive"));
4535 /* Don't generate any opcodes, just record the information for later. */
4536 unwind
.fp_reg
= fp_reg
;
4538 if (sp_reg
== REG_SP
)
4539 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4541 unwind
.fp_offset
-= offset
;
4544 /* Parse an unwind_raw directive. */
4547 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4550 /* This is an arbitrary limit. */
4551 unsigned char op
[16];
4554 if (!unwind
.proc_start
)
4555 as_bad (MISSING_FNSTART
);
4558 if (exp
.X_op
== O_constant
4559 && skip_past_comma (&input_line_pointer
) != FAIL
)
4561 unwind
.frame_size
+= exp
.X_add_number
;
4565 exp
.X_op
= O_illegal
;
4567 if (exp
.X_op
!= O_constant
)
4569 as_bad (_("expected <offset>, <opcode>"));
4570 ignore_rest_of_line ();
4576 /* Parse the opcode. */
4581 as_bad (_("unwind opcode too long"));
4582 ignore_rest_of_line ();
4584 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4586 as_bad (_("invalid unwind opcode"));
4587 ignore_rest_of_line ();
4590 op
[count
++] = exp
.X_add_number
;
4592 /* Parse the next byte. */
4593 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4599 /* Add the opcode bytes in reverse order. */
4601 add_unwind_opcode (op
[count
], 1);
4603 demand_empty_rest_of_line ();
4607 /* Parse a .eabi_attribute directive. */
4610 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4612 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4614 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4615 attributes_set_explicitly
[tag
] = 1;
4618 /* Emit a tls fix for the symbol. */
4621 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4625 #ifdef md_flush_pending_output
4626 md_flush_pending_output ();
4629 #ifdef md_cons_align
4633 /* Since we're just labelling the code, there's no need to define a
4636 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4637 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4638 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4639 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4641 #endif /* OBJ_ELF */
4643 static void s_arm_arch (int);
4644 static void s_arm_object_arch (int);
4645 static void s_arm_cpu (int);
4646 static void s_arm_fpu (int);
4647 static void s_arm_arch_extension (int);
4652 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4659 if (exp
.X_op
== O_symbol
)
4660 exp
.X_op
= O_secrel
;
4662 emit_expr (&exp
, 4);
4664 while (*input_line_pointer
++ == ',');
4666 input_line_pointer
--;
4667 demand_empty_rest_of_line ();
4671 /* This table describes all the machine specific pseudo-ops the assembler
4672 has to support. The fields are:
4673 pseudo-op name without dot
4674 function to call to execute this pseudo-op
4675 Integer arg to pass to the function. */
4677 const pseudo_typeS md_pseudo_table
[] =
4679 /* Never called because '.req' does not start a line. */
4680 { "req", s_req
, 0 },
4681 /* Following two are likewise never called. */
4684 { "unreq", s_unreq
, 0 },
4685 { "bss", s_bss
, 0 },
4686 { "align", s_align_ptwo
, 2 },
4687 { "arm", s_arm
, 0 },
4688 { "thumb", s_thumb
, 0 },
4689 { "code", s_code
, 0 },
4690 { "force_thumb", s_force_thumb
, 0 },
4691 { "thumb_func", s_thumb_func
, 0 },
4692 { "thumb_set", s_thumb_set
, 0 },
4693 { "even", s_even
, 0 },
4694 { "ltorg", s_ltorg
, 0 },
4695 { "pool", s_ltorg
, 0 },
4696 { "syntax", s_syntax
, 0 },
4697 { "cpu", s_arm_cpu
, 0 },
4698 { "arch", s_arm_arch
, 0 },
4699 { "object_arch", s_arm_object_arch
, 0 },
4700 { "fpu", s_arm_fpu
, 0 },
4701 { "arch_extension", s_arm_arch_extension
, 0 },
4703 { "word", s_arm_elf_cons
, 4 },
4704 { "long", s_arm_elf_cons
, 4 },
4705 { "inst.n", s_arm_elf_inst
, 2 },
4706 { "inst.w", s_arm_elf_inst
, 4 },
4707 { "inst", s_arm_elf_inst
, 0 },
4708 { "rel31", s_arm_rel31
, 0 },
4709 { "fnstart", s_arm_unwind_fnstart
, 0 },
4710 { "fnend", s_arm_unwind_fnend
, 0 },
4711 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4712 { "personality", s_arm_unwind_personality
, 0 },
4713 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4714 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4715 { "save", s_arm_unwind_save
, 0 },
4716 { "vsave", s_arm_unwind_save
, 1 },
4717 { "movsp", s_arm_unwind_movsp
, 0 },
4718 { "pad", s_arm_unwind_pad
, 0 },
4719 { "setfp", s_arm_unwind_setfp
, 0 },
4720 { "unwind_raw", s_arm_unwind_raw
, 0 },
4721 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4722 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4726 /* These are used for dwarf. */
4730 /* These are used for dwarf2. */
4731 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4732 { "loc", dwarf2_directive_loc
, 0 },
4733 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4735 { "extend", float_cons
, 'x' },
4736 { "ldouble", float_cons
, 'x' },
4737 { "packed", float_cons
, 'p' },
4739 {"secrel32", pe_directive_secrel
, 0},
4742 /* These are for compatibility with CodeComposer Studio. */
4743 {"ref", s_ccs_ref
, 0},
4744 {"def", s_ccs_def
, 0},
4745 {"asmfunc", s_ccs_asmfunc
, 0},
4746 {"endasmfunc", s_ccs_endasmfunc
, 0},
4751 /* Parser functions used exclusively in instruction operands. */
4753 /* Generic immediate-value read function for use in insn parsing.
4754 STR points to the beginning of the immediate (the leading #);
4755 VAL receives the value; if the value is outside [MIN, MAX]
4756 issue an error. PREFIX_OPT is true if the immediate prefix is
4760 parse_immediate (char **str
, int *val
, int min
, int max
,
4761 bfd_boolean prefix_opt
)
4764 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4765 if (exp
.X_op
!= O_constant
)
4767 inst
.error
= _("constant expression required");
4771 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4773 inst
.error
= _("immediate value out of range");
4777 *val
= exp
.X_add_number
;
4781 /* Less-generic immediate-value read function with the possibility of loading a
4782 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4783 instructions. Puts the result directly in inst.operands[i]. */
4786 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4787 bfd_boolean allow_symbol_p
)
4790 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4793 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4795 if (exp_p
->X_op
== O_constant
)
4797 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4798 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4799 O_constant. We have to be careful not to break compilation for
4800 32-bit X_add_number, though. */
4801 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4803 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4804 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4806 inst
.operands
[i
].regisimm
= 1;
4809 else if (exp_p
->X_op
== O_big
4810 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4812 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4814 /* Bignums have their least significant bits in
4815 generic_bignum[0]. Make sure we put 32 bits in imm and
4816 32 bits in reg, in a (hopefully) portable way. */
4817 gas_assert (parts
!= 0);
4819 /* Make sure that the number is not too big.
4820 PR 11972: Bignums can now be sign-extended to the
4821 size of a .octa so check that the out of range bits
4822 are all zero or all one. */
4823 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4825 LITTLENUM_TYPE m
= -1;
4827 if (generic_bignum
[parts
* 2] != 0
4828 && generic_bignum
[parts
* 2] != m
)
4831 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4832 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4836 inst
.operands
[i
].imm
= 0;
4837 for (j
= 0; j
< parts
; j
++, idx
++)
4838 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4839 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4840 inst
.operands
[i
].reg
= 0;
4841 for (j
= 0; j
< parts
; j
++, idx
++)
4842 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4843 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4844 inst
.operands
[i
].regisimm
= 1;
4846 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4854 /* Returns the pseudo-register number of an FPA immediate constant,
4855 or FAIL if there isn't a valid constant here. */
4858 parse_fpa_immediate (char ** str
)
4860 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4866 /* First try and match exact strings, this is to guarantee
4867 that some formats will work even for cross assembly. */
4869 for (i
= 0; fp_const
[i
]; i
++)
4871 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4875 *str
+= strlen (fp_const
[i
]);
4876 if (is_end_of_line
[(unsigned char) **str
])
4882 /* Just because we didn't get a match doesn't mean that the constant
4883 isn't valid, just that it is in a format that we don't
4884 automatically recognize. Try parsing it with the standard
4885 expression routines. */
4887 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4889 /* Look for a raw floating point number. */
4890 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4891 && is_end_of_line
[(unsigned char) *save_in
])
4893 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4895 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4897 if (words
[j
] != fp_values
[i
][j
])
4901 if (j
== MAX_LITTLENUMS
)
4909 /* Try and parse a more complex expression, this will probably fail
4910 unless the code uses a floating point prefix (eg "0f"). */
4911 save_in
= input_line_pointer
;
4912 input_line_pointer
= *str
;
4913 if (expression (&exp
) == absolute_section
4914 && exp
.X_op
== O_big
4915 && exp
.X_add_number
< 0)
4917 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4919 #define X_PRECISION 5
4920 #define E_PRECISION 15L
4921 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4923 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4925 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4927 if (words
[j
] != fp_values
[i
][j
])
4931 if (j
== MAX_LITTLENUMS
)
4933 *str
= input_line_pointer
;
4934 input_line_pointer
= save_in
;
4941 *str
= input_line_pointer
;
4942 input_line_pointer
= save_in
;
4943 inst
.error
= _("invalid FPA immediate expression");
4947 /* Returns 1 if a number has "quarter-precision" float format
4948 0baBbbbbbc defgh000 00000000 00000000. */
4951 is_quarter_float (unsigned imm
)
4953 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4954 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4958 /* Detect the presence of a floating point or integer zero constant,
4962 parse_ifimm_zero (char **in
)
4966 if (!is_immediate_prefix (**in
))
4971 /* Accept #0x0 as a synonym for #0. */
4972 if (strncmp (*in
, "0x", 2) == 0)
4975 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4980 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4981 &generic_floating_point_number
);
4984 && generic_floating_point_number
.sign
== '+'
4985 && (generic_floating_point_number
.low
4986 > generic_floating_point_number
.leader
))
4992 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4993 0baBbbbbbc defgh000 00000000 00000000.
4994 The zero and minus-zero cases need special handling, since they can't be
4995 encoded in the "quarter-precision" float format, but can nonetheless be
4996 loaded as integer constants. */
4999 parse_qfloat_immediate (char **ccp
, int *immed
)
5003 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5004 int found_fpchar
= 0;
5006 skip_past_char (&str
, '#');
5008 /* We must not accidentally parse an integer as a floating-point number. Make
5009 sure that the value we parse is not an integer by checking for special
5010 characters '.' or 'e'.
5011 FIXME: This is a horrible hack, but doing better is tricky because type
5012 information isn't in a very usable state at parse time. */
5014 skip_whitespace (fpnum
);
5016 if (strncmp (fpnum
, "0x", 2) == 0)
5020 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5021 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5031 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5033 unsigned fpword
= 0;
5036 /* Our FP word must be 32 bits (single-precision FP). */
5037 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5039 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5043 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5056 /* Shift operands. */
5059 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5062 struct asm_shift_name
5065 enum shift_kind kind
;
5068 /* Third argument to parse_shift. */
5069 enum parse_shift_mode
5071 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5072 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5073 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5074 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5075 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5078 /* Parse a <shift> specifier on an ARM data processing instruction.
5079 This has three forms:
5081 (LSL|LSR|ASL|ASR|ROR) Rs
5082 (LSL|LSR|ASL|ASR|ROR) #imm
5085 Note that ASL is assimilated to LSL in the instruction encoding, and
5086 RRX to ROR #0 (which cannot be written as such). */
5089 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5091 const struct asm_shift_name
*shift_name
;
5092 enum shift_kind shift
;
5097 for (p
= *str
; ISALPHA (*p
); p
++)
5102 inst
.error
= _("shift expression expected");
5106 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5109 if (shift_name
== NULL
)
5111 inst
.error
= _("shift expression expected");
5115 shift
= shift_name
->kind
;
5119 case NO_SHIFT_RESTRICT
:
5120 case SHIFT_IMMEDIATE
: break;
5122 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5123 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5125 inst
.error
= _("'LSL' or 'ASR' required");
5130 case SHIFT_LSL_IMMEDIATE
:
5131 if (shift
!= SHIFT_LSL
)
5133 inst
.error
= _("'LSL' required");
5138 case SHIFT_ASR_IMMEDIATE
:
5139 if (shift
!= SHIFT_ASR
)
5141 inst
.error
= _("'ASR' required");
5149 if (shift
!= SHIFT_RRX
)
5151 /* Whitespace can appear here if the next thing is a bare digit. */
5152 skip_whitespace (p
);
5154 if (mode
== NO_SHIFT_RESTRICT
5155 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5157 inst
.operands
[i
].imm
= reg
;
5158 inst
.operands
[i
].immisreg
= 1;
5160 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5163 inst
.operands
[i
].shift_kind
= shift
;
5164 inst
.operands
[i
].shifted
= 1;
5169 /* Parse a <shifter_operand> for an ARM data processing instruction:
5172 #<immediate>, <rotate>
5176 where <shift> is defined by parse_shift above, and <rotate> is a
5177 multiple of 2 between 0 and 30. Validation of immediate operands
5178 is deferred to md_apply_fix. */
5181 parse_shifter_operand (char **str
, int i
)
5186 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5188 inst
.operands
[i
].reg
= value
;
5189 inst
.operands
[i
].isreg
= 1;
5191 /* parse_shift will override this if appropriate */
5192 inst
.reloc
.exp
.X_op
= O_constant
;
5193 inst
.reloc
.exp
.X_add_number
= 0;
5195 if (skip_past_comma (str
) == FAIL
)
5198 /* Shift operation on register. */
5199 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5202 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5205 if (skip_past_comma (str
) == SUCCESS
)
5207 /* #x, y -- ie explicit rotation by Y. */
5208 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5211 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5213 inst
.error
= _("constant expression expected");
5217 value
= exp
.X_add_number
;
5218 if (value
< 0 || value
> 30 || value
% 2 != 0)
5220 inst
.error
= _("invalid rotation");
5223 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5225 inst
.error
= _("invalid constant");
5229 /* Encode as specified. */
5230 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5234 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5235 inst
.reloc
.pc_rel
= 0;
5239 /* Group relocation information. Each entry in the table contains the
5240 textual name of the relocation as may appear in assembler source
5241 and must end with a colon.
5242 Along with this textual name are the relocation codes to be used if
5243 the corresponding instruction is an ALU instruction (ADD or SUB only),
5244 an LDR, an LDRS, or an LDC. */
5246 struct group_reloc_table_entry
5257 /* Varieties of non-ALU group relocation. */
5264 static struct group_reloc_table_entry group_reloc_table
[] =
5265 { /* Program counter relative: */
5267 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5272 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5273 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5274 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5275 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5277 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5282 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5283 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5284 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5285 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5287 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5288 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5289 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5290 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5291 /* Section base relative */
5293 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5298 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5299 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5300 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5301 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5303 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5308 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5309 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5310 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5311 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5313 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5314 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5315 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5316 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5317 /* Absolute thumb alu relocations. */
5319 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5324 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5329 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5334 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5339 /* Given the address of a pointer pointing to the textual name of a group
5340 relocation as may appear in assembler source, attempt to find its details
5341 in group_reloc_table. The pointer will be updated to the character after
5342 the trailing colon. On failure, FAIL will be returned; SUCCESS
5343 otherwise. On success, *entry will be updated to point at the relevant
5344 group_reloc_table entry. */
5347 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5350 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5352 int length
= strlen (group_reloc_table
[i
].name
);
5354 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5355 && (*str
)[length
] == ':')
5357 *out
= &group_reloc_table
[i
];
5358 *str
+= (length
+ 1);
5366 /* Parse a <shifter_operand> for an ARM data processing instruction
5367 (as for parse_shifter_operand) where group relocations are allowed:
5370 #<immediate>, <rotate>
5371 #:<group_reloc>:<expression>
5375 where <group_reloc> is one of the strings defined in group_reloc_table.
5376 The hashes are optional.
5378 Everything else is as for parse_shifter_operand. */
5380 static parse_operand_result
5381 parse_shifter_operand_group_reloc (char **str
, int i
)
5383 /* Determine if we have the sequence of characters #: or just :
5384 coming next. If we do, then we check for a group relocation.
5385 If we don't, punt the whole lot to parse_shifter_operand. */
5387 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5388 || (*str
)[0] == ':')
5390 struct group_reloc_table_entry
*entry
;
5392 if ((*str
)[0] == '#')
5397 /* Try to parse a group relocation. Anything else is an error. */
5398 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5400 inst
.error
= _("unknown group relocation");
5401 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5404 /* We now have the group relocation table entry corresponding to
5405 the name in the assembler source. Next, we parse the expression. */
5406 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5407 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5409 /* Record the relocation type (always the ALU variant here). */
5410 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5411 gas_assert (inst
.reloc
.type
!= 0);
5413 return PARSE_OPERAND_SUCCESS
;
5416 return parse_shifter_operand (str
, i
) == SUCCESS
5417 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5419 /* Never reached. */
5422 /* Parse a Neon alignment expression. Information is written to
5423 inst.operands[i]. We assume the initial ':' has been skipped.
5425 align .imm = align << 8, .immisalign=1, .preind=0 */
5426 static parse_operand_result
5427 parse_neon_alignment (char **str
, int i
)
5432 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5434 if (exp
.X_op
!= O_constant
)
5436 inst
.error
= _("alignment must be constant");
5437 return PARSE_OPERAND_FAIL
;
5440 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5441 inst
.operands
[i
].immisalign
= 1;
5442 /* Alignments are not pre-indexes. */
5443 inst
.operands
[i
].preind
= 0;
5446 return PARSE_OPERAND_SUCCESS
;
5449 /* Parse all forms of an ARM address expression. Information is written
5450 to inst.operands[i] and/or inst.reloc.
5452 Preindexed addressing (.preind=1):
5454 [Rn, #offset] .reg=Rn .reloc.exp=offset
5455 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5456 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5457 .shift_kind=shift .reloc.exp=shift_imm
5459 These three may have a trailing ! which causes .writeback to be set also.
5461 Postindexed addressing (.postind=1, .writeback=1):
5463 [Rn], #offset .reg=Rn .reloc.exp=offset
5464 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5465 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5466 .shift_kind=shift .reloc.exp=shift_imm
5468 Unindexed addressing (.preind=0, .postind=0):
5470 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5474 [Rn]{!} shorthand for [Rn,#0]{!}
5475 =immediate .isreg=0 .reloc.exp=immediate
5476 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5478 It is the caller's responsibility to check for addressing modes not
5479 supported by the instruction, and to set inst.reloc.type. */
5481 static parse_operand_result
5482 parse_address_main (char **str
, int i
, int group_relocations
,
5483 group_reloc_type group_type
)
5488 if (skip_past_char (&p
, '[') == FAIL
)
5490 if (skip_past_char (&p
, '=') == FAIL
)
5492 /* Bare address - translate to PC-relative offset. */
5493 inst
.reloc
.pc_rel
= 1;
5494 inst
.operands
[i
].reg
= REG_PC
;
5495 inst
.operands
[i
].isreg
= 1;
5496 inst
.operands
[i
].preind
= 1;
5498 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5499 return PARSE_OPERAND_FAIL
;
5501 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5502 /*allow_symbol_p=*/TRUE
))
5503 return PARSE_OPERAND_FAIL
;
5506 return PARSE_OPERAND_SUCCESS
;
5509 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5510 skip_whitespace (p
);
5512 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5514 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5515 return PARSE_OPERAND_FAIL
;
5517 inst
.operands
[i
].reg
= reg
;
5518 inst
.operands
[i
].isreg
= 1;
5520 if (skip_past_comma (&p
) == SUCCESS
)
5522 inst
.operands
[i
].preind
= 1;
5525 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5527 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5529 inst
.operands
[i
].imm
= reg
;
5530 inst
.operands
[i
].immisreg
= 1;
5532 if (skip_past_comma (&p
) == SUCCESS
)
5533 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5534 return PARSE_OPERAND_FAIL
;
5536 else if (skip_past_char (&p
, ':') == SUCCESS
)
5538 /* FIXME: '@' should be used here, but it's filtered out by generic
5539 code before we get to see it here. This may be subject to
5541 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5543 if (result
!= PARSE_OPERAND_SUCCESS
)
5548 if (inst
.operands
[i
].negative
)
5550 inst
.operands
[i
].negative
= 0;
5554 if (group_relocations
5555 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5557 struct group_reloc_table_entry
*entry
;
5559 /* Skip over the #: or : sequence. */
5565 /* Try to parse a group relocation. Anything else is an
5567 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5569 inst
.error
= _("unknown group relocation");
5570 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5573 /* We now have the group relocation table entry corresponding to
5574 the name in the assembler source. Next, we parse the
5576 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5577 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5579 /* Record the relocation type. */
5583 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5587 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5591 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5598 if (inst
.reloc
.type
== 0)
5600 inst
.error
= _("this group relocation is not allowed on this instruction");
5601 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5607 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5608 return PARSE_OPERAND_FAIL
;
5609 /* If the offset is 0, find out if it's a +0 or -0. */
5610 if (inst
.reloc
.exp
.X_op
== O_constant
5611 && inst
.reloc
.exp
.X_add_number
== 0)
5613 skip_whitespace (q
);
5617 skip_whitespace (q
);
5620 inst
.operands
[i
].negative
= 1;
5625 else if (skip_past_char (&p
, ':') == SUCCESS
)
5627 /* FIXME: '@' should be used here, but it's filtered out by generic code
5628 before we get to see it here. This may be subject to change. */
5629 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5631 if (result
!= PARSE_OPERAND_SUCCESS
)
5635 if (skip_past_char (&p
, ']') == FAIL
)
5637 inst
.error
= _("']' expected");
5638 return PARSE_OPERAND_FAIL
;
5641 if (skip_past_char (&p
, '!') == SUCCESS
)
5642 inst
.operands
[i
].writeback
= 1;
5644 else if (skip_past_comma (&p
) == SUCCESS
)
5646 if (skip_past_char (&p
, '{') == SUCCESS
)
5648 /* [Rn], {expr} - unindexed, with option */
5649 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5650 0, 255, TRUE
) == FAIL
)
5651 return PARSE_OPERAND_FAIL
;
5653 if (skip_past_char (&p
, '}') == FAIL
)
5655 inst
.error
= _("'}' expected at end of 'option' field");
5656 return PARSE_OPERAND_FAIL
;
5658 if (inst
.operands
[i
].preind
)
5660 inst
.error
= _("cannot combine index with option");
5661 return PARSE_OPERAND_FAIL
;
5664 return PARSE_OPERAND_SUCCESS
;
5668 inst
.operands
[i
].postind
= 1;
5669 inst
.operands
[i
].writeback
= 1;
5671 if (inst
.operands
[i
].preind
)
5673 inst
.error
= _("cannot combine pre- and post-indexing");
5674 return PARSE_OPERAND_FAIL
;
5678 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5680 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5682 /* We might be using the immediate for alignment already. If we
5683 are, OR the register number into the low-order bits. */
5684 if (inst
.operands
[i
].immisalign
)
5685 inst
.operands
[i
].imm
|= reg
;
5687 inst
.operands
[i
].imm
= reg
;
5688 inst
.operands
[i
].immisreg
= 1;
5690 if (skip_past_comma (&p
) == SUCCESS
)
5691 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5692 return PARSE_OPERAND_FAIL
;
5697 if (inst
.operands
[i
].negative
)
5699 inst
.operands
[i
].negative
= 0;
5702 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5703 return PARSE_OPERAND_FAIL
;
5704 /* If the offset is 0, find out if it's a +0 or -0. */
5705 if (inst
.reloc
.exp
.X_op
== O_constant
5706 && inst
.reloc
.exp
.X_add_number
== 0)
5708 skip_whitespace (q
);
5712 skip_whitespace (q
);
5715 inst
.operands
[i
].negative
= 1;
5721 /* If at this point neither .preind nor .postind is set, we have a
5722 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5723 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5725 inst
.operands
[i
].preind
= 1;
5726 inst
.reloc
.exp
.X_op
= O_constant
;
5727 inst
.reloc
.exp
.X_add_number
= 0;
5730 return PARSE_OPERAND_SUCCESS
;
5734 parse_address (char **str
, int i
)
5736 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5740 static parse_operand_result
5741 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5743 return parse_address_main (str
, i
, 1, type
);
5746 /* Parse an operand for a MOVW or MOVT instruction. */
5748 parse_half (char **str
)
5753 skip_past_char (&p
, '#');
5754 if (strncasecmp (p
, ":lower16:", 9) == 0)
5755 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5756 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5757 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5759 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5762 skip_whitespace (p
);
5765 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5768 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5770 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5772 inst
.error
= _("constant expression expected");
5775 if (inst
.reloc
.exp
.X_add_number
< 0
5776 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5778 inst
.error
= _("immediate value out of range");
5786 /* Miscellaneous. */
5788 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5789 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5791 parse_psr (char **str
, bfd_boolean lhs
)
5794 unsigned long psr_field
;
5795 const struct asm_psr
*psr
;
5797 bfd_boolean is_apsr
= FALSE
;
5798 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5800 /* PR gas/12698: If the user has specified -march=all then m_profile will
5801 be TRUE, but we want to ignore it in this case as we are building for any
5802 CPU type, including non-m variants. */
5803 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5806 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5807 feature for ease of use and backwards compatibility. */
5809 if (strncasecmp (p
, "SPSR", 4) == 0)
5812 goto unsupported_psr
;
5814 psr_field
= SPSR_BIT
;
5816 else if (strncasecmp (p
, "CPSR", 4) == 0)
5819 goto unsupported_psr
;
5823 else if (strncasecmp (p
, "APSR", 4) == 0)
5825 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5826 and ARMv7-R architecture CPUs. */
5835 while (ISALNUM (*p
) || *p
== '_');
5837 if (strncasecmp (start
, "iapsr", 5) == 0
5838 || strncasecmp (start
, "eapsr", 5) == 0
5839 || strncasecmp (start
, "xpsr", 4) == 0
5840 || strncasecmp (start
, "psr", 3) == 0)
5841 p
= start
+ strcspn (start
, "rR") + 1;
5843 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5849 /* If APSR is being written, a bitfield may be specified. Note that
5850 APSR itself is handled above. */
5851 if (psr
->field
<= 3)
5853 psr_field
= psr
->field
;
5859 /* M-profile MSR instructions have the mask field set to "10", except
5860 *PSR variants which modify APSR, which may use a different mask (and
5861 have been handled already). Do that by setting the PSR_f field
5863 return psr
->field
| (lhs
? PSR_f
: 0);
5866 goto unsupported_psr
;
5872 /* A suffix follows. */
5878 while (ISALNUM (*p
) || *p
== '_');
5882 /* APSR uses a notation for bits, rather than fields. */
5883 unsigned int nzcvq_bits
= 0;
5884 unsigned int g_bit
= 0;
5887 for (bit
= start
; bit
!= p
; bit
++)
5889 switch (TOLOWER (*bit
))
5892 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5896 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5900 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5904 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5908 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5912 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5916 inst
.error
= _("unexpected bit specified after APSR");
5921 if (nzcvq_bits
== 0x1f)
5926 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5928 inst
.error
= _("selected processor does not "
5929 "support DSP extension");
5936 if ((nzcvq_bits
& 0x20) != 0
5937 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5938 || (g_bit
& 0x2) != 0)
5940 inst
.error
= _("bad bitmask specified after APSR");
5946 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5951 psr_field
|= psr
->field
;
5957 goto error
; /* Garbage after "[CS]PSR". */
5959 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5960 is deprecated, but allow it anyway. */
5964 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5967 else if (!m_profile
)
5968 /* These bits are never right for M-profile devices: don't set them
5969 (only code paths which read/write APSR reach here). */
5970 psr_field
|= (PSR_c
| PSR_f
);
5976 inst
.error
= _("selected processor does not support requested special "
5977 "purpose register");
5981 inst
.error
= _("flag for {c}psr instruction expected");
5985 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5986 value suitable for splatting into the AIF field of the instruction. */
5989 parse_cps_flags (char **str
)
5998 case '\0': case ',':
6001 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6002 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6003 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6006 inst
.error
= _("unrecognized CPS flag");
6011 if (saw_a_flag
== 0)
6013 inst
.error
= _("missing CPS flags");
6021 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6022 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6025 parse_endian_specifier (char **str
)
6030 if (strncasecmp (s
, "BE", 2))
6032 else if (strncasecmp (s
, "LE", 2))
6036 inst
.error
= _("valid endian specifiers are be or le");
6040 if (ISALNUM (s
[2]) || s
[2] == '_')
6042 inst
.error
= _("valid endian specifiers are be or le");
6047 return little_endian
;
6050 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6051 value suitable for poking into the rotate field of an sxt or sxta
6052 instruction, or FAIL on error. */
6055 parse_ror (char **str
)
6060 if (strncasecmp (s
, "ROR", 3) == 0)
6064 inst
.error
= _("missing rotation field after comma");
6068 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6073 case 0: *str
= s
; return 0x0;
6074 case 8: *str
= s
; return 0x1;
6075 case 16: *str
= s
; return 0x2;
6076 case 24: *str
= s
; return 0x3;
6079 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6084 /* Parse a conditional code (from conds[] below). The value returned is in the
6085 range 0 .. 14, or FAIL. */
6087 parse_cond (char **str
)
6090 const struct asm_cond
*c
;
6092 /* Condition codes are always 2 characters, so matching up to
6093 3 characters is sufficient. */
6098 while (ISALPHA (*q
) && n
< 3)
6100 cond
[n
] = TOLOWER (*q
);
6105 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6108 inst
.error
= _("condition required");
6116 /* Record a use of the given feature. */
6118 record_feature_use (const arm_feature_set
*feature
)
6121 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6123 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6126 /* If the given feature available in the selected CPU, mark it as used.
6127 Returns TRUE iff feature is available. */
6129 mark_feature_used (const arm_feature_set
*feature
)
6131 /* Ensure the option is valid on the current architecture. */
6132 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6135 /* Add the appropriate architecture feature for the barrier option used.
6137 record_feature_use (feature
);
6142 /* Parse an option for a barrier instruction. Returns the encoding for the
6145 parse_barrier (char **str
)
6148 const struct asm_barrier_opt
*o
;
6151 while (ISALPHA (*q
))
6154 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6159 if (!mark_feature_used (&o
->arch
))
6166 /* Parse the operands of a table branch instruction. Similar to a memory
6169 parse_tb (char **str
)
6174 if (skip_past_char (&p
, '[') == FAIL
)
6176 inst
.error
= _("'[' expected");
6180 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6182 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6185 inst
.operands
[0].reg
= reg
;
6187 if (skip_past_comma (&p
) == FAIL
)
6189 inst
.error
= _("',' expected");
6193 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6195 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6198 inst
.operands
[0].imm
= reg
;
6200 if (skip_past_comma (&p
) == SUCCESS
)
6202 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6204 if (inst
.reloc
.exp
.X_add_number
!= 1)
6206 inst
.error
= _("invalid shift");
6209 inst
.operands
[0].shifted
= 1;
6212 if (skip_past_char (&p
, ']') == FAIL
)
6214 inst
.error
= _("']' expected");
6221 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6222 information on the types the operands can take and how they are encoded.
6223 Up to four operands may be read; this function handles setting the
6224 ".present" field for each read operand itself.
6225 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6226 else returns FAIL. */
6229 parse_neon_mov (char **str
, int *which_operand
)
6231 int i
= *which_operand
, val
;
6232 enum arm_reg_type rtype
;
6234 struct neon_type_el optype
;
6236 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6238 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6239 inst
.operands
[i
].reg
= val
;
6240 inst
.operands
[i
].isscalar
= 1;
6241 inst
.operands
[i
].vectype
= optype
;
6242 inst
.operands
[i
++].present
= 1;
6244 if (skip_past_comma (&ptr
) == FAIL
)
6247 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6250 inst
.operands
[i
].reg
= val
;
6251 inst
.operands
[i
].isreg
= 1;
6252 inst
.operands
[i
].present
= 1;
6254 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6257 /* Cases 0, 1, 2, 3, 5 (D only). */
6258 if (skip_past_comma (&ptr
) == FAIL
)
6261 inst
.operands
[i
].reg
= val
;
6262 inst
.operands
[i
].isreg
= 1;
6263 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6264 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6265 inst
.operands
[i
].isvec
= 1;
6266 inst
.operands
[i
].vectype
= optype
;
6267 inst
.operands
[i
++].present
= 1;
6269 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6271 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6272 Case 13: VMOV <Sd>, <Rm> */
6273 inst
.operands
[i
].reg
= val
;
6274 inst
.operands
[i
].isreg
= 1;
6275 inst
.operands
[i
].present
= 1;
6277 if (rtype
== REG_TYPE_NQ
)
6279 first_error (_("can't use Neon quad register here"));
6282 else if (rtype
!= REG_TYPE_VFS
)
6285 if (skip_past_comma (&ptr
) == FAIL
)
6287 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6289 inst
.operands
[i
].reg
= val
;
6290 inst
.operands
[i
].isreg
= 1;
6291 inst
.operands
[i
].present
= 1;
6294 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6297 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6298 Case 1: VMOV<c><q> <Dd>, <Dm>
6299 Case 8: VMOV.F32 <Sd>, <Sm>
6300 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6302 inst
.operands
[i
].reg
= val
;
6303 inst
.operands
[i
].isreg
= 1;
6304 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6305 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6306 inst
.operands
[i
].isvec
= 1;
6307 inst
.operands
[i
].vectype
= optype
;
6308 inst
.operands
[i
].present
= 1;
6310 if (skip_past_comma (&ptr
) == SUCCESS
)
6315 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6318 inst
.operands
[i
].reg
= val
;
6319 inst
.operands
[i
].isreg
= 1;
6320 inst
.operands
[i
++].present
= 1;
6322 if (skip_past_comma (&ptr
) == FAIL
)
6325 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6328 inst
.operands
[i
].reg
= val
;
6329 inst
.operands
[i
].isreg
= 1;
6330 inst
.operands
[i
].present
= 1;
6333 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6334 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6335 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6336 Case 10: VMOV.F32 <Sd>, #<imm>
6337 Case 11: VMOV.F64 <Dd>, #<imm> */
6338 inst
.operands
[i
].immisfloat
= 1;
6339 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6341 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6342 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6346 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6350 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6353 inst
.operands
[i
].reg
= val
;
6354 inst
.operands
[i
].isreg
= 1;
6355 inst
.operands
[i
++].present
= 1;
6357 if (skip_past_comma (&ptr
) == FAIL
)
6360 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6362 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6363 inst
.operands
[i
].reg
= val
;
6364 inst
.operands
[i
].isscalar
= 1;
6365 inst
.operands
[i
].present
= 1;
6366 inst
.operands
[i
].vectype
= optype
;
6368 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6370 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6371 inst
.operands
[i
].reg
= val
;
6372 inst
.operands
[i
].isreg
= 1;
6373 inst
.operands
[i
++].present
= 1;
6375 if (skip_past_comma (&ptr
) == FAIL
)
6378 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6381 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6385 inst
.operands
[i
].reg
= val
;
6386 inst
.operands
[i
].isreg
= 1;
6387 inst
.operands
[i
].isvec
= 1;
6388 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6389 inst
.operands
[i
].vectype
= optype
;
6390 inst
.operands
[i
].present
= 1;
6392 if (rtype
== REG_TYPE_VFS
)
6396 if (skip_past_comma (&ptr
) == FAIL
)
6398 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6401 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6404 inst
.operands
[i
].reg
= val
;
6405 inst
.operands
[i
].isreg
= 1;
6406 inst
.operands
[i
].isvec
= 1;
6407 inst
.operands
[i
].issingle
= 1;
6408 inst
.operands
[i
].vectype
= optype
;
6409 inst
.operands
[i
].present
= 1;
6412 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6416 inst
.operands
[i
].reg
= val
;
6417 inst
.operands
[i
].isreg
= 1;
6418 inst
.operands
[i
].isvec
= 1;
6419 inst
.operands
[i
].issingle
= 1;
6420 inst
.operands
[i
].vectype
= optype
;
6421 inst
.operands
[i
].present
= 1;
6426 first_error (_("parse error"));
6430 /* Successfully parsed the operands. Update args. */
6436 first_error (_("expected comma"));
6440 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6444 /* Use this macro when the operand constraints are different
6445 for ARM and THUMB (e.g. ldrd). */
6446 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6447 ((arm_operand) | ((thumb_operand) << 16))
6449 /* Matcher codes for parse_operands. */
6450 enum operand_parse_code
6452 OP_stop
, /* end of line */
6454 OP_RR
, /* ARM register */
6455 OP_RRnpc
, /* ARM register, not r15 */
6456 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6457 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6458 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6459 optional trailing ! */
6460 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6461 OP_RCP
, /* Coprocessor number */
6462 OP_RCN
, /* Coprocessor register */
6463 OP_RF
, /* FPA register */
6464 OP_RVS
, /* VFP single precision register */
6465 OP_RVD
, /* VFP double precision register (0..15) */
6466 OP_RND
, /* Neon double precision register (0..31) */
6467 OP_RNQ
, /* Neon quad precision register */
6468 OP_RVSD
, /* VFP single or double precision register */
6469 OP_RNDQ
, /* Neon double or quad precision register */
6470 OP_RNSDQ
, /* Neon single, double or quad precision register */
6471 OP_RNSC
, /* Neon scalar D[X] */
6472 OP_RVC
, /* VFP control register */
6473 OP_RMF
, /* Maverick F register */
6474 OP_RMD
, /* Maverick D register */
6475 OP_RMFX
, /* Maverick FX register */
6476 OP_RMDX
, /* Maverick DX register */
6477 OP_RMAX
, /* Maverick AX register */
6478 OP_RMDS
, /* Maverick DSPSC register */
6479 OP_RIWR
, /* iWMMXt wR register */
6480 OP_RIWC
, /* iWMMXt wC register */
6481 OP_RIWG
, /* iWMMXt wCG register */
6482 OP_RXA
, /* XScale accumulator register */
6484 OP_REGLST
, /* ARM register list */
6485 OP_VRSLST
, /* VFP single-precision register list */
6486 OP_VRDLST
, /* VFP double-precision register list */
6487 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6488 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6489 OP_NSTRLST
, /* Neon element/structure list */
6491 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6492 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6493 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6494 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6495 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6496 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6497 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6498 OP_VMOV
, /* Neon VMOV operands. */
6499 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6500 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6501 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6503 OP_I0
, /* immediate zero */
6504 OP_I7
, /* immediate value 0 .. 7 */
6505 OP_I15
, /* 0 .. 15 */
6506 OP_I16
, /* 1 .. 16 */
6507 OP_I16z
, /* 0 .. 16 */
6508 OP_I31
, /* 0 .. 31 */
6509 OP_I31w
, /* 0 .. 31, optional trailing ! */
6510 OP_I32
, /* 1 .. 32 */
6511 OP_I32z
, /* 0 .. 32 */
6512 OP_I63
, /* 0 .. 63 */
6513 OP_I63s
, /* -64 .. 63 */
6514 OP_I64
, /* 1 .. 64 */
6515 OP_I64z
, /* 0 .. 64 */
6516 OP_I255
, /* 0 .. 255 */
6518 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6519 OP_I7b
, /* 0 .. 7 */
6520 OP_I15b
, /* 0 .. 15 */
6521 OP_I31b
, /* 0 .. 31 */
6523 OP_SH
, /* shifter operand */
6524 OP_SHG
, /* shifter operand with possible group relocation */
6525 OP_ADDR
, /* Memory address expression (any mode) */
6526 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6527 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6528 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6529 OP_EXP
, /* arbitrary expression */
6530 OP_EXPi
, /* same, with optional immediate prefix */
6531 OP_EXPr
, /* same, with optional relocation suffix */
6532 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6534 OP_CPSF
, /* CPS flags */
6535 OP_ENDI
, /* Endianness specifier */
6536 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6537 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6538 OP_COND
, /* conditional code */
6539 OP_TB
, /* Table branch. */
6541 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6543 OP_RRnpc_I0
, /* ARM register or literal 0 */
6544 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
6545 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6546 OP_RF_IF
, /* FPA register or immediate */
6547 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6548 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6550 /* Optional operands. */
6551 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6552 OP_oI31b
, /* 0 .. 31 */
6553 OP_oI32b
, /* 1 .. 32 */
6554 OP_oI32z
, /* 0 .. 32 */
6555 OP_oIffffb
, /* 0 .. 65535 */
6556 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6558 OP_oRR
, /* ARM register */
6559 OP_oRRnpc
, /* ARM register, not the PC */
6560 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6561 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6562 OP_oRND
, /* Optional Neon double precision register */
6563 OP_oRNQ
, /* Optional Neon quad precision register */
6564 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6565 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6566 OP_oSHll
, /* LSL immediate */
6567 OP_oSHar
, /* ASR immediate */
6568 OP_oSHllar
, /* LSL or ASR immediate */
6569 OP_oROR
, /* ROR 0/8/16/24 */
6570 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6572 /* Some pre-defined mixed (ARM/THUMB) operands. */
6573 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6574 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6575 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6577 OP_FIRST_OPTIONAL
= OP_oI7b
6580 /* Generic instruction operand parser. This does no encoding and no
6581 semantic validation; it merely squirrels values away in the inst
6582 structure. Returns SUCCESS or FAIL depending on whether the
6583 specified grammar matched. */
6585 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6587 unsigned const int *upat
= pattern
;
6588 char *backtrack_pos
= 0;
6589 const char *backtrack_error
= 0;
6590 int i
, val
= 0, backtrack_index
= 0;
6591 enum arm_reg_type rtype
;
6592 parse_operand_result result
;
6593 unsigned int op_parse_code
;
6595 #define po_char_or_fail(chr) \
6598 if (skip_past_char (&str, chr) == FAIL) \
6603 #define po_reg_or_fail(regtype) \
6606 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6607 & inst.operands[i].vectype); \
6610 first_error (_(reg_expected_msgs[regtype])); \
6613 inst.operands[i].reg = val; \
6614 inst.operands[i].isreg = 1; \
6615 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6616 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6617 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6618 || rtype == REG_TYPE_VFD \
6619 || rtype == REG_TYPE_NQ); \
6623 #define po_reg_or_goto(regtype, label) \
6626 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6627 & inst.operands[i].vectype); \
6631 inst.operands[i].reg = val; \
6632 inst.operands[i].isreg = 1; \
6633 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6634 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6635 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6636 || rtype == REG_TYPE_VFD \
6637 || rtype == REG_TYPE_NQ); \
6641 #define po_imm_or_fail(min, max, popt) \
6644 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6646 inst.operands[i].imm = val; \
6650 #define po_scalar_or_goto(elsz, label) \
6653 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6656 inst.operands[i].reg = val; \
6657 inst.operands[i].isscalar = 1; \
6661 #define po_misc_or_fail(expr) \
6669 #define po_misc_or_fail_no_backtrack(expr) \
6673 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6674 backtrack_pos = 0; \
6675 if (result != PARSE_OPERAND_SUCCESS) \
6680 #define po_barrier_or_imm(str) \
6683 val = parse_barrier (&str); \
6684 if (val == FAIL && ! ISALPHA (*str)) \
6687 /* ISB can only take SY as an option. */ \
6688 || ((inst.instruction & 0xf0) == 0x60 \
6691 inst.error = _("invalid barrier type"); \
6692 backtrack_pos = 0; \
6698 skip_whitespace (str
);
6700 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6702 op_parse_code
= upat
[i
];
6703 if (op_parse_code
>= 1<<16)
6704 op_parse_code
= thumb
? (op_parse_code
>> 16)
6705 : (op_parse_code
& ((1<<16)-1));
6707 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6709 /* Remember where we are in case we need to backtrack. */
6710 gas_assert (!backtrack_pos
);
6711 backtrack_pos
= str
;
6712 backtrack_error
= inst
.error
;
6713 backtrack_index
= i
;
6716 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6717 po_char_or_fail (',');
6719 switch (op_parse_code
)
6727 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6728 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6729 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6730 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6731 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6732 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6734 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6736 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6738 /* Also accept generic coprocessor regs for unknown registers. */
6740 po_reg_or_fail (REG_TYPE_CN
);
6742 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6743 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6744 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6745 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6746 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6747 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6748 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6749 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6750 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6751 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6753 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6755 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6756 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6758 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6760 /* Neon scalar. Using an element size of 8 means that some invalid
6761 scalars are accepted here, so deal with those in later code. */
6762 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6766 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6769 po_imm_or_fail (0, 0, TRUE
);
6774 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6779 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6782 if (parse_ifimm_zero (&str
))
6783 inst
.operands
[i
].imm
= 0;
6787 = _("only floating point zero is allowed as immediate value");
6795 po_scalar_or_goto (8, try_rr
);
6798 po_reg_or_fail (REG_TYPE_RN
);
6804 po_scalar_or_goto (8, try_nsdq
);
6807 po_reg_or_fail (REG_TYPE_NSDQ
);
6813 po_scalar_or_goto (8, try_ndq
);
6816 po_reg_or_fail (REG_TYPE_NDQ
);
6822 po_scalar_or_goto (8, try_vfd
);
6825 po_reg_or_fail (REG_TYPE_VFD
);
6830 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6831 not careful then bad things might happen. */
6832 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6837 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6840 /* There's a possibility of getting a 64-bit immediate here, so
6841 we need special handling. */
6842 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6845 inst
.error
= _("immediate value is out of range");
6853 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6856 po_imm_or_fail (0, 63, TRUE
);
6861 po_char_or_fail ('[');
6862 po_reg_or_fail (REG_TYPE_RN
);
6863 po_char_or_fail (']');
6869 po_reg_or_fail (REG_TYPE_RN
);
6870 if (skip_past_char (&str
, '!') == SUCCESS
)
6871 inst
.operands
[i
].writeback
= 1;
6875 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6876 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6877 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6878 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6879 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6880 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6881 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6882 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6883 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6884 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6885 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6886 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6888 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6890 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6891 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6893 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6894 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6895 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6896 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6898 /* Immediate variants */
6900 po_char_or_fail ('{');
6901 po_imm_or_fail (0, 255, TRUE
);
6902 po_char_or_fail ('}');
6906 /* The expression parser chokes on a trailing !, so we have
6907 to find it first and zap it. */
6910 while (*s
&& *s
!= ',')
6915 inst
.operands
[i
].writeback
= 1;
6917 po_imm_or_fail (0, 31, TRUE
);
6925 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6930 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6935 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6937 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6939 val
= parse_reloc (&str
);
6942 inst
.error
= _("unrecognized relocation suffix");
6945 else if (val
!= BFD_RELOC_UNUSED
)
6947 inst
.operands
[i
].imm
= val
;
6948 inst
.operands
[i
].hasreloc
= 1;
6953 /* Operand for MOVW or MOVT. */
6955 po_misc_or_fail (parse_half (&str
));
6958 /* Register or expression. */
6959 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6960 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6962 /* Register or immediate. */
6963 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6964 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6966 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6968 if (!is_immediate_prefix (*str
))
6971 val
= parse_fpa_immediate (&str
);
6974 /* FPA immediates are encoded as registers 8-15.
6975 parse_fpa_immediate has already applied the offset. */
6976 inst
.operands
[i
].reg
= val
;
6977 inst
.operands
[i
].isreg
= 1;
6980 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6981 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6983 /* Two kinds of register. */
6986 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6988 || (rege
->type
!= REG_TYPE_MMXWR
6989 && rege
->type
!= REG_TYPE_MMXWC
6990 && rege
->type
!= REG_TYPE_MMXWCG
))
6992 inst
.error
= _("iWMMXt data or control register expected");
6995 inst
.operands
[i
].reg
= rege
->number
;
6996 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7002 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7004 || (rege
->type
!= REG_TYPE_MMXWC
7005 && rege
->type
!= REG_TYPE_MMXWCG
))
7007 inst
.error
= _("iWMMXt control register expected");
7010 inst
.operands
[i
].reg
= rege
->number
;
7011 inst
.operands
[i
].isreg
= 1;
7016 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7017 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7018 case OP_oROR
: val
= parse_ror (&str
); break;
7019 case OP_COND
: val
= parse_cond (&str
); break;
7020 case OP_oBARRIER_I15
:
7021 po_barrier_or_imm (str
); break;
7023 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7029 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7030 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7032 inst
.error
= _("Banked registers are not available with this "
7038 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7042 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7045 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7047 if (strncasecmp (str
, "APSR_", 5) == 0)
7054 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7055 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7056 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7057 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7058 default: found
= 16;
7062 inst
.operands
[i
].isvec
= 1;
7063 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7064 inst
.operands
[i
].reg
= REG_PC
;
7071 po_misc_or_fail (parse_tb (&str
));
7074 /* Register lists. */
7076 val
= parse_reg_list (&str
);
7079 inst
.operands
[i
].writeback
= 1;
7085 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7089 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7093 /* Allow Q registers too. */
7094 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7099 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7101 inst
.operands
[i
].issingle
= 1;
7106 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7111 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7112 &inst
.operands
[i
].vectype
);
7115 /* Addressing modes */
7117 po_misc_or_fail (parse_address (&str
, i
));
7121 po_misc_or_fail_no_backtrack (
7122 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7126 po_misc_or_fail_no_backtrack (
7127 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7131 po_misc_or_fail_no_backtrack (
7132 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7136 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7140 po_misc_or_fail_no_backtrack (
7141 parse_shifter_operand_group_reloc (&str
, i
));
7145 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7149 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7153 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7157 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7160 /* Various value-based sanity checks and shared operations. We
7161 do not signal immediate failures for the register constraints;
7162 this allows a syntax error to take precedence. */
7163 switch (op_parse_code
)
7171 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7172 inst
.error
= BAD_PC
;
7177 if (inst
.operands
[i
].isreg
)
7179 if (inst
.operands
[i
].reg
== REG_PC
)
7180 inst
.error
= BAD_PC
;
7181 else if (inst
.operands
[i
].reg
== REG_SP
)
7182 inst
.error
= BAD_SP
;
7187 if (inst
.operands
[i
].isreg
7188 && inst
.operands
[i
].reg
== REG_PC
7189 && (inst
.operands
[i
].writeback
|| thumb
))
7190 inst
.error
= BAD_PC
;
7199 case OP_oBARRIER_I15
:
7208 inst
.operands
[i
].imm
= val
;
7215 /* If we get here, this operand was successfully parsed. */
7216 inst
.operands
[i
].present
= 1;
7220 inst
.error
= BAD_ARGS
;
7225 /* The parse routine should already have set inst.error, but set a
7226 default here just in case. */
7228 inst
.error
= _("syntax error");
7232 /* Do not backtrack over a trailing optional argument that
7233 absorbed some text. We will only fail again, with the
7234 'garbage following instruction' error message, which is
7235 probably less helpful than the current one. */
7236 if (backtrack_index
== i
&& backtrack_pos
!= str
7237 && upat
[i
+1] == OP_stop
)
7240 inst
.error
= _("syntax error");
7244 /* Try again, skipping the optional argument at backtrack_pos. */
7245 str
= backtrack_pos
;
7246 inst
.error
= backtrack_error
;
7247 inst
.operands
[backtrack_index
].present
= 0;
7248 i
= backtrack_index
;
7252 /* Check that we have parsed all the arguments. */
7253 if (*str
!= '\0' && !inst
.error
)
7254 inst
.error
= _("garbage following instruction");
7256 return inst
.error
? FAIL
: SUCCESS
;
7259 #undef po_char_or_fail
7260 #undef po_reg_or_fail
7261 #undef po_reg_or_goto
7262 #undef po_imm_or_fail
7263 #undef po_scalar_or_fail
7264 #undef po_barrier_or_imm
7266 /* Shorthand macro for instruction encoding functions issuing errors. */
7267 #define constraint(expr, err) \
7278 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7279 instructions are unpredictable if these registers are used. This
7280 is the BadReg predicate in ARM's Thumb-2 documentation. */
7281 #define reject_bad_reg(reg) \
7283 if (reg == REG_SP || reg == REG_PC) \
7285 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7290 /* If REG is R13 (the stack pointer), warn that its use is
7292 #define warn_deprecated_sp(reg) \
7294 if (warn_on_deprecated && reg == REG_SP) \
7295 as_tsktsk (_("use of r13 is deprecated")); \
7298 /* Functions for operand encoding. ARM, then Thumb. */
7300 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7302 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7304 The only binary encoding difference is the Coprocessor number. Coprocessor
7305 9 is used for half-precision calculations or conversions. The format of the
7306 instruction is the same as the equivalent Coprocessor 10 instuction that
7307 exists for Single-Precision operation. */
7310 do_scalar_fp16_v82_encode (void)
7312 if (inst
.cond
!= COND_ALWAYS
)
7313 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7314 " the behaviour is UNPREDICTABLE"));
7315 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7318 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7319 mark_feature_used (&arm_ext_fp16
);
7322 /* If VAL can be encoded in the immediate field of an ARM instruction,
7323 return the encoded form. Otherwise, return FAIL. */
7326 encode_arm_immediate (unsigned int val
)
7333 for (i
= 2; i
< 32; i
+= 2)
7334 if ((a
= rotate_left (val
, i
)) <= 0xff)
7335 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7340 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7341 return the encoded form. Otherwise, return FAIL. */
7343 encode_thumb32_immediate (unsigned int val
)
7350 for (i
= 1; i
<= 24; i
++)
7353 if ((val
& ~(0xff << i
)) == 0)
7354 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7358 if (val
== ((a
<< 16) | a
))
7360 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7364 if (val
== ((a
<< 16) | a
))
7365 return 0x200 | (a
>> 8);
7369 /* Encode a VFP SP or DP register number into inst.instruction. */
7372 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7374 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7377 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7380 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7383 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7388 first_error (_("D register out of range for selected VFP version"));
7396 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7400 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7404 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7408 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7412 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7416 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7424 /* Encode a <shift> in an ARM-format instruction. The immediate,
7425 if any, is handled by md_apply_fix. */
7427 encode_arm_shift (int i
)
7429 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7430 inst
.instruction
|= SHIFT_ROR
<< 5;
7433 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7434 if (inst
.operands
[i
].immisreg
)
7436 inst
.instruction
|= SHIFT_BY_REG
;
7437 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7440 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7445 encode_arm_shifter_operand (int i
)
7447 if (inst
.operands
[i
].isreg
)
7449 inst
.instruction
|= inst
.operands
[i
].reg
;
7450 encode_arm_shift (i
);
7454 inst
.instruction
|= INST_IMMEDIATE
;
7455 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7456 inst
.instruction
|= inst
.operands
[i
].imm
;
7460 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7462 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7465 Generate an error if the operand is not a register. */
7466 constraint (!inst
.operands
[i
].isreg
,
7467 _("Instruction does not support =N addresses"));
7469 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7471 if (inst
.operands
[i
].preind
)
7475 inst
.error
= _("instruction does not accept preindexed addressing");
7478 inst
.instruction
|= PRE_INDEX
;
7479 if (inst
.operands
[i
].writeback
)
7480 inst
.instruction
|= WRITE_BACK
;
7483 else if (inst
.operands
[i
].postind
)
7485 gas_assert (inst
.operands
[i
].writeback
);
7487 inst
.instruction
|= WRITE_BACK
;
7489 else /* unindexed - only for coprocessor */
7491 inst
.error
= _("instruction does not accept unindexed addressing");
7495 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7496 && (((inst
.instruction
& 0x000f0000) >> 16)
7497 == ((inst
.instruction
& 0x0000f000) >> 12)))
7498 as_warn ((inst
.instruction
& LOAD_BIT
)
7499 ? _("destination register same as write-back base")
7500 : _("source register same as write-back base"));
7503 /* inst.operands[i] was set up by parse_address. Encode it into an
7504 ARM-format mode 2 load or store instruction. If is_t is true,
7505 reject forms that cannot be used with a T instruction (i.e. not
7508 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7510 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7512 encode_arm_addr_mode_common (i
, is_t
);
7514 if (inst
.operands
[i
].immisreg
)
7516 constraint ((inst
.operands
[i
].imm
== REG_PC
7517 || (is_pc
&& inst
.operands
[i
].writeback
)),
7519 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7520 inst
.instruction
|= inst
.operands
[i
].imm
;
7521 if (!inst
.operands
[i
].negative
)
7522 inst
.instruction
|= INDEX_UP
;
7523 if (inst
.operands
[i
].shifted
)
7525 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7526 inst
.instruction
|= SHIFT_ROR
<< 5;
7529 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7530 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7534 else /* immediate offset in inst.reloc */
7536 if (is_pc
&& !inst
.reloc
.pc_rel
)
7538 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7540 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7541 cannot use PC in addressing.
7542 PC cannot be used in writeback addressing, either. */
7543 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7546 /* Use of PC in str is deprecated for ARMv7. */
7547 if (warn_on_deprecated
7549 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7550 as_tsktsk (_("use of PC in this instruction is deprecated"));
7553 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7555 /* Prefer + for zero encoded value. */
7556 if (!inst
.operands
[i
].negative
)
7557 inst
.instruction
|= INDEX_UP
;
7558 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7563 /* inst.operands[i] was set up by parse_address. Encode it into an
7564 ARM-format mode 3 load or store instruction. Reject forms that
7565 cannot be used with such instructions. If is_t is true, reject
7566 forms that cannot be used with a T instruction (i.e. not
7569 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7571 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7573 inst
.error
= _("instruction does not accept scaled register index");
7577 encode_arm_addr_mode_common (i
, is_t
);
7579 if (inst
.operands
[i
].immisreg
)
7581 constraint ((inst
.operands
[i
].imm
== REG_PC
7582 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7584 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7586 inst
.instruction
|= inst
.operands
[i
].imm
;
7587 if (!inst
.operands
[i
].negative
)
7588 inst
.instruction
|= INDEX_UP
;
7590 else /* immediate offset in inst.reloc */
7592 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7593 && inst
.operands
[i
].writeback
),
7595 inst
.instruction
|= HWOFFSET_IMM
;
7596 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7598 /* Prefer + for zero encoded value. */
7599 if (!inst
.operands
[i
].negative
)
7600 inst
.instruction
|= INDEX_UP
;
7602 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7607 /* Write immediate bits [7:0] to the following locations:
7609 |28/24|23 19|18 16|15 4|3 0|
7610 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7612 This function is used by VMOV/VMVN/VORR/VBIC. */
7615 neon_write_immbits (unsigned immbits
)
7617 inst
.instruction
|= immbits
& 0xf;
7618 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7619 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7622 /* Invert low-order SIZE bits of XHI:XLO. */
7625 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7627 unsigned immlo
= xlo
? *xlo
: 0;
7628 unsigned immhi
= xhi
? *xhi
: 0;
7633 immlo
= (~immlo
) & 0xff;
7637 immlo
= (~immlo
) & 0xffff;
7641 immhi
= (~immhi
) & 0xffffffff;
7645 immlo
= (~immlo
) & 0xffffffff;
7659 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7663 neon_bits_same_in_bytes (unsigned imm
)
7665 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7666 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7667 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7668 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7671 /* For immediate of above form, return 0bABCD. */
7674 neon_squash_bits (unsigned imm
)
7676 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7677 | ((imm
& 0x01000000) >> 21);
7680 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7683 neon_qfloat_bits (unsigned imm
)
7685 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7688 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7689 the instruction. *OP is passed as the initial value of the op field, and
7690 may be set to a different value depending on the constant (i.e.
7691 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7692 MVN). If the immediate looks like a repeated pattern then also
7693 try smaller element sizes. */
7696 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7697 unsigned *immbits
, int *op
, int size
,
7698 enum neon_el_type type
)
7700 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7702 if (type
== NT_float
&& !float_p
)
7705 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7707 if (size
!= 32 || *op
== 1)
7709 *immbits
= neon_qfloat_bits (immlo
);
7715 if (neon_bits_same_in_bytes (immhi
)
7716 && neon_bits_same_in_bytes (immlo
))
7720 *immbits
= (neon_squash_bits (immhi
) << 4)
7721 | neon_squash_bits (immlo
);
7732 if (immlo
== (immlo
& 0x000000ff))
7737 else if (immlo
== (immlo
& 0x0000ff00))
7739 *immbits
= immlo
>> 8;
7742 else if (immlo
== (immlo
& 0x00ff0000))
7744 *immbits
= immlo
>> 16;
7747 else if (immlo
== (immlo
& 0xff000000))
7749 *immbits
= immlo
>> 24;
7752 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7754 *immbits
= (immlo
>> 8) & 0xff;
7757 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7759 *immbits
= (immlo
>> 16) & 0xff;
7763 if ((immlo
& 0xffff) != (immlo
>> 16))
7770 if (immlo
== (immlo
& 0x000000ff))
7775 else if (immlo
== (immlo
& 0x0000ff00))
7777 *immbits
= immlo
>> 8;
7781 if ((immlo
& 0xff) != (immlo
>> 8))
7786 if (immlo
== (immlo
& 0x000000ff))
7788 /* Don't allow MVN with 8-bit immediate. */
7798 #if defined BFD_HOST_64_BIT
7799 /* Returns TRUE if double precision value V may be cast
7800 to single precision without loss of accuracy. */
7803 is_double_a_single (bfd_int64_t v
)
7805 int exp
= (int)((v
>> 52) & 0x7FF);
7806 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7808 return (exp
== 0 || exp
== 0x7FF
7809 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7810 && (mantissa
& 0x1FFFFFFFl
) == 0;
7813 /* Returns a double precision value casted to single precision
7814 (ignoring the least significant bits in exponent and mantissa). */
7817 double_to_single (bfd_int64_t v
)
7819 int sign
= (int) ((v
>> 63) & 1l);
7820 int exp
= (int) ((v
>> 52) & 0x7FF);
7821 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7827 exp
= exp
- 1023 + 127;
7836 /* No denormalized numbers. */
7842 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7844 #endif /* BFD_HOST_64_BIT */
7853 static void do_vfp_nsyn_opcode (const char *);
7855 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7856 Determine whether it can be performed with a move instruction; if
7857 it can, convert inst.instruction to that move instruction and
7858 return TRUE; if it can't, convert inst.instruction to a literal-pool
7859 load and return FALSE. If this is not a valid thing to do in the
7860 current context, set inst.error and return TRUE.
7862 inst.operands[i] describes the destination register. */
7865 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7868 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7869 bfd_boolean arm_p
= (t
== CONST_ARM
);
7872 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7876 if ((inst
.instruction
& tbit
) == 0)
7878 inst
.error
= _("invalid pseudo operation");
7882 if (inst
.reloc
.exp
.X_op
!= O_constant
7883 && inst
.reloc
.exp
.X_op
!= O_symbol
7884 && inst
.reloc
.exp
.X_op
!= O_big
)
7886 inst
.error
= _("constant expression expected");
7890 if (inst
.reloc
.exp
.X_op
== O_constant
7891 || inst
.reloc
.exp
.X_op
== O_big
)
7893 #if defined BFD_HOST_64_BIT
7898 if (inst
.reloc
.exp
.X_op
== O_big
)
7900 LITTLENUM_TYPE w
[X_PRECISION
];
7903 if (inst
.reloc
.exp
.X_add_number
== -1)
7905 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7907 /* FIXME: Should we check words w[2..5] ? */
7912 #if defined BFD_HOST_64_BIT
7914 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7915 << LITTLENUM_NUMBER_OF_BITS
)
7916 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7917 << LITTLENUM_NUMBER_OF_BITS
)
7918 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7919 << LITTLENUM_NUMBER_OF_BITS
)
7920 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7922 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7923 | (l
[0] & LITTLENUM_MASK
);
7927 v
= inst
.reloc
.exp
.X_add_number
;
7929 if (!inst
.operands
[i
].issingle
)
7933 /* This can be encoded only for a low register. */
7934 if ((v
& ~0xFF) == 0 && (inst
.operands
[i
].reg
< 8))
7936 /* This can be done with a mov(1) instruction. */
7937 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7938 inst
.instruction
|= v
;
7942 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7943 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7945 /* Check if on thumb2 it can be done with a mov.w, mvn or
7946 movw instruction. */
7947 unsigned int newimm
;
7948 bfd_boolean isNegated
;
7950 newimm
= encode_thumb32_immediate (v
);
7951 if (newimm
!= (unsigned int) FAIL
)
7955 newimm
= encode_thumb32_immediate (~v
);
7956 if (newimm
!= (unsigned int) FAIL
)
7960 /* The number can be loaded with a mov.w or mvn
7962 if (newimm
!= (unsigned int) FAIL
7963 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
7965 inst
.instruction
= (0xf04f0000 /* MOV.W. */
7966 | (inst
.operands
[i
].reg
<< 8));
7967 /* Change to MOVN. */
7968 inst
.instruction
|= (isNegated
? 0x200000 : 0);
7969 inst
.instruction
|= (newimm
& 0x800) << 15;
7970 inst
.instruction
|= (newimm
& 0x700) << 4;
7971 inst
.instruction
|= (newimm
& 0x0ff);
7974 /* The number can be loaded with a movw instruction. */
7975 else if ((v
& ~0xFFFF) == 0
7976 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7978 int imm
= v
& 0xFFFF;
7980 inst
.instruction
= 0xf2400000; /* MOVW. */
7981 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
7982 inst
.instruction
|= (imm
& 0xf000) << 4;
7983 inst
.instruction
|= (imm
& 0x0800) << 15;
7984 inst
.instruction
|= (imm
& 0x0700) << 4;
7985 inst
.instruction
|= (imm
& 0x00ff);
7992 int value
= encode_arm_immediate (v
);
7996 /* This can be done with a mov instruction. */
7997 inst
.instruction
&= LITERAL_MASK
;
7998 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
7999 inst
.instruction
|= value
& 0xfff;
8003 value
= encode_arm_immediate (~ v
);
8006 /* This can be done with a mvn instruction. */
8007 inst
.instruction
&= LITERAL_MASK
;
8008 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8009 inst
.instruction
|= value
& 0xfff;
8013 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8016 unsigned immbits
= 0;
8017 unsigned immlo
= inst
.operands
[1].imm
;
8018 unsigned immhi
= inst
.operands
[1].regisimm
8019 ? inst
.operands
[1].reg
8020 : inst
.reloc
.exp
.X_unsigned
8022 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8023 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8024 &op
, 64, NT_invtype
);
8028 neon_invert_size (&immlo
, &immhi
, 64);
8030 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8031 &op
, 64, NT_invtype
);
8036 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8042 /* Fill other bits in vmov encoding for both thumb and arm. */
8044 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8046 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8047 neon_write_immbits (immbits
);
8055 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8056 if (inst
.operands
[i
].issingle
8057 && is_quarter_float (inst
.operands
[1].imm
)
8058 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8060 inst
.operands
[1].imm
=
8061 neon_qfloat_bits (v
);
8062 do_vfp_nsyn_opcode ("fconsts");
8066 /* If our host does not support a 64-bit type then we cannot perform
8067 the following optimization. This mean that there will be a
8068 discrepancy between the output produced by an assembler built for
8069 a 32-bit-only host and the output produced from a 64-bit host, but
8070 this cannot be helped. */
8071 #if defined BFD_HOST_64_BIT
8072 else if (!inst
.operands
[1].issingle
8073 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8075 if (is_double_a_single (v
)
8076 && is_quarter_float (double_to_single (v
)))
8078 inst
.operands
[1].imm
=
8079 neon_qfloat_bits (double_to_single (v
));
8080 do_vfp_nsyn_opcode ("fconstd");
8088 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8089 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8092 inst
.operands
[1].reg
= REG_PC
;
8093 inst
.operands
[1].isreg
= 1;
8094 inst
.operands
[1].preind
= 1;
8095 inst
.reloc
.pc_rel
= 1;
8096 inst
.reloc
.type
= (thumb_p
8097 ? BFD_RELOC_ARM_THUMB_OFFSET
8099 ? BFD_RELOC_ARM_HWLITERAL
8100 : BFD_RELOC_ARM_LITERAL
));
8104 /* inst.operands[i] was set up by parse_address. Encode it into an
8105 ARM-format instruction. Reject all forms which cannot be encoded
8106 into a coprocessor load/store instruction. If wb_ok is false,
8107 reject use of writeback; if unind_ok is false, reject use of
8108 unindexed addressing. If reloc_override is not 0, use it instead
8109 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8110 (in which case it is preserved). */
8113 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8115 if (!inst
.operands
[i
].isreg
)
8118 if (! inst
.operands
[0].isvec
)
8120 inst
.error
= _("invalid co-processor operand");
8123 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8127 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8129 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8131 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8133 gas_assert (!inst
.operands
[i
].writeback
);
8136 inst
.error
= _("instruction does not support unindexed addressing");
8139 inst
.instruction
|= inst
.operands
[i
].imm
;
8140 inst
.instruction
|= INDEX_UP
;
8144 if (inst
.operands
[i
].preind
)
8145 inst
.instruction
|= PRE_INDEX
;
8147 if (inst
.operands
[i
].writeback
)
8149 if (inst
.operands
[i
].reg
== REG_PC
)
8151 inst
.error
= _("pc may not be used with write-back");
8156 inst
.error
= _("instruction does not support writeback");
8159 inst
.instruction
|= WRITE_BACK
;
8163 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8164 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8165 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8166 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8169 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8171 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8174 /* Prefer + for zero encoded value. */
8175 if (!inst
.operands
[i
].negative
)
8176 inst
.instruction
|= INDEX_UP
;
8181 /* Functions for instruction encoding, sorted by sub-architecture.
8182 First some generics; their names are taken from the conventional
8183 bit positions for register arguments in ARM format instructions. */
8193 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8199 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8205 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8206 inst
.instruction
|= inst
.operands
[1].reg
;
8212 inst
.instruction
|= inst
.operands
[0].reg
;
8213 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8219 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8220 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8226 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8227 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8233 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8234 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8238 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8240 if (ARM_CPU_IS_ANY (cpu_variant
))
8242 as_tsktsk ("%s", msg
);
8245 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8257 unsigned Rn
= inst
.operands
[2].reg
;
8258 /* Enforce restrictions on SWP instruction. */
8259 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8261 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8262 _("Rn must not overlap other operands"));
8264 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8266 if (!check_obsolete (&arm_ext_v8
,
8267 _("swp{b} use is obsoleted for ARMv8 and later"))
8268 && warn_on_deprecated
8269 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8270 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8273 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8274 inst
.instruction
|= inst
.operands
[1].reg
;
8275 inst
.instruction
|= Rn
<< 16;
8281 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8282 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8283 inst
.instruction
|= inst
.operands
[2].reg
;
8289 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8290 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8291 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8292 || inst
.reloc
.exp
.X_add_number
!= 0),
8294 inst
.instruction
|= inst
.operands
[0].reg
;
8295 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8296 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8302 inst
.instruction
|= inst
.operands
[0].imm
;
8308 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8309 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8312 /* ARM instructions, in alphabetical order by function name (except
8313 that wrapper functions appear immediately after the function they
8316 /* This is a pseudo-op of the form "adr rd, label" to be converted
8317 into a relative address of the form "add rd, pc, #label-.-8". */
8322 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8324 /* Frag hacking will turn this into a sub instruction if the offset turns
8325 out to be negative. */
8326 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8327 inst
.reloc
.pc_rel
= 1;
8328 inst
.reloc
.exp
.X_add_number
-= 8;
8331 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8332 into a relative address of the form:
8333 add rd, pc, #low(label-.-8)"
8334 add rd, rd, #high(label-.-8)" */
8339 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8341 /* Frag hacking will turn this into a sub instruction if the offset turns
8342 out to be negative. */
8343 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8344 inst
.reloc
.pc_rel
= 1;
8345 inst
.size
= INSN_SIZE
* 2;
8346 inst
.reloc
.exp
.X_add_number
-= 8;
8352 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8353 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8355 if (!inst
.operands
[1].present
)
8356 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8357 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8358 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8359 encode_arm_shifter_operand (2);
8365 if (inst
.operands
[0].present
)
8366 inst
.instruction
|= inst
.operands
[0].imm
;
8368 inst
.instruction
|= 0xf;
8374 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8375 constraint (msb
> 32, _("bit-field extends past end of register"));
8376 /* The instruction encoding stores the LSB and MSB,
8377 not the LSB and width. */
8378 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8379 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8380 inst
.instruction
|= (msb
- 1) << 16;
8388 /* #0 in second position is alternative syntax for bfc, which is
8389 the same instruction but with REG_PC in the Rm field. */
8390 if (!inst
.operands
[1].isreg
)
8391 inst
.operands
[1].reg
= REG_PC
;
8393 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8394 constraint (msb
> 32, _("bit-field extends past end of register"));
8395 /* The instruction encoding stores the LSB and MSB,
8396 not the LSB and width. */
8397 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8398 inst
.instruction
|= inst
.operands
[1].reg
;
8399 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8400 inst
.instruction
|= (msb
- 1) << 16;
8406 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8407 _("bit-field extends past end of register"));
8408 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8409 inst
.instruction
|= inst
.operands
[1].reg
;
8410 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8411 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8414 /* ARM V5 breakpoint instruction (argument parse)
8415 BKPT <16 bit unsigned immediate>
8416 Instruction is not conditional.
8417 The bit pattern given in insns[] has the COND_ALWAYS condition,
8418 and it is an error if the caller tried to override that. */
8423 /* Top 12 of 16 bits to bits 19:8. */
8424 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8426 /* Bottom 4 of 16 bits to bits 3:0. */
8427 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8431 encode_branch (int default_reloc
)
8433 if (inst
.operands
[0].hasreloc
)
8435 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8436 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8437 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8438 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8439 ? BFD_RELOC_ARM_PLT32
8440 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8443 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8444 inst
.reloc
.pc_rel
= 1;
8451 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8452 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8455 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8462 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8464 if (inst
.cond
== COND_ALWAYS
)
8465 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8467 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8471 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8474 /* ARM V5 branch-link-exchange instruction (argument parse)
8475 BLX <target_addr> ie BLX(1)
8476 BLX{<condition>} <Rm> ie BLX(2)
8477 Unfortunately, there are two different opcodes for this mnemonic.
8478 So, the insns[].value is not used, and the code here zaps values
8479 into inst.instruction.
8480 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8485 if (inst
.operands
[0].isreg
)
8487 /* Arg is a register; the opcode provided by insns[] is correct.
8488 It is not illegal to do "blx pc", just useless. */
8489 if (inst
.operands
[0].reg
== REG_PC
)
8490 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8492 inst
.instruction
|= inst
.operands
[0].reg
;
8496 /* Arg is an address; this instruction cannot be executed
8497 conditionally, and the opcode must be adjusted.
8498 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8499 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8500 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8501 inst
.instruction
= 0xfa000000;
8502 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8509 bfd_boolean want_reloc
;
8511 if (inst
.operands
[0].reg
== REG_PC
)
8512 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8514 inst
.instruction
|= inst
.operands
[0].reg
;
8515 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8516 it is for ARMv4t or earlier. */
8517 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8518 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8522 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8527 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8531 /* ARM v5TEJ. Jump to Jazelle code. */
8536 if (inst
.operands
[0].reg
== REG_PC
)
8537 as_tsktsk (_("use of r15 in bxj is not really useful"));
8539 inst
.instruction
|= inst
.operands
[0].reg
;
8542 /* Co-processor data operation:
8543 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8544 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8548 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8549 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8550 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8551 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8552 inst
.instruction
|= inst
.operands
[4].reg
;
8553 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8559 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8560 encode_arm_shifter_operand (1);
8563 /* Transfer between coprocessor and ARM registers.
8564 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8569 No special properties. */
8571 struct deprecated_coproc_regs_s
8578 arm_feature_set deprecated
;
8579 arm_feature_set obsoleted
;
8580 const char *dep_msg
;
8581 const char *obs_msg
;
8584 #define DEPR_ACCESS_V8 \
8585 N_("This coprocessor register access is deprecated in ARMv8")
8587 /* Table of all deprecated coprocessor registers. */
8588 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8590 {15, 0, 7, 10, 5, /* CP15DMB. */
8591 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8592 DEPR_ACCESS_V8
, NULL
},
8593 {15, 0, 7, 10, 4, /* CP15DSB. */
8594 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8595 DEPR_ACCESS_V8
, NULL
},
8596 {15, 0, 7, 5, 4, /* CP15ISB. */
8597 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8598 DEPR_ACCESS_V8
, NULL
},
8599 {14, 6, 1, 0, 0, /* TEEHBR. */
8600 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8601 DEPR_ACCESS_V8
, NULL
},
8602 {14, 6, 0, 0, 0, /* TEECR. */
8603 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8604 DEPR_ACCESS_V8
, NULL
},
8607 #undef DEPR_ACCESS_V8
8609 static const size_t deprecated_coproc_reg_count
=
8610 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8618 Rd
= inst
.operands
[2].reg
;
8621 if (inst
.instruction
== 0xee000010
8622 || inst
.instruction
== 0xfe000010)
8624 reject_bad_reg (Rd
);
8627 constraint (Rd
== REG_SP
, BAD_SP
);
8632 if (inst
.instruction
== 0xe000010)
8633 constraint (Rd
== REG_PC
, BAD_PC
);
8636 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8638 const struct deprecated_coproc_regs_s
*r
=
8639 deprecated_coproc_regs
+ i
;
8641 if (inst
.operands
[0].reg
== r
->cp
8642 && inst
.operands
[1].imm
== r
->opc1
8643 && inst
.operands
[3].reg
== r
->crn
8644 && inst
.operands
[4].reg
== r
->crm
8645 && inst
.operands
[5].imm
== r
->opc2
)
8647 if (! ARM_CPU_IS_ANY (cpu_variant
)
8648 && warn_on_deprecated
8649 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8650 as_tsktsk ("%s", r
->dep_msg
);
8654 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8655 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8656 inst
.instruction
|= Rd
<< 12;
8657 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8658 inst
.instruction
|= inst
.operands
[4].reg
;
8659 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8662 /* Transfer between coprocessor register and pair of ARM registers.
8663 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8668 Two XScale instructions are special cases of these:
8670 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8671 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8673 Result unpredictable if Rd or Rn is R15. */
8680 Rd
= inst
.operands
[2].reg
;
8681 Rn
= inst
.operands
[3].reg
;
8685 reject_bad_reg (Rd
);
8686 reject_bad_reg (Rn
);
8690 constraint (Rd
== REG_PC
, BAD_PC
);
8691 constraint (Rn
== REG_PC
, BAD_PC
);
8694 /* Only check the MRRC{2} variants. */
8695 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
8697 /* If Rd == Rn, error that the operation is
8698 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8699 constraint (Rd
== Rn
, BAD_OVERLAP
);
8702 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8703 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8704 inst
.instruction
|= Rd
<< 12;
8705 inst
.instruction
|= Rn
<< 16;
8706 inst
.instruction
|= inst
.operands
[4].reg
;
8712 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8713 if (inst
.operands
[1].present
)
8715 inst
.instruction
|= CPSI_MMOD
;
8716 inst
.instruction
|= inst
.operands
[1].imm
;
8723 inst
.instruction
|= inst
.operands
[0].imm
;
8729 unsigned Rd
, Rn
, Rm
;
8731 Rd
= inst
.operands
[0].reg
;
8732 Rn
= (inst
.operands
[1].present
8733 ? inst
.operands
[1].reg
: Rd
);
8734 Rm
= inst
.operands
[2].reg
;
8736 constraint ((Rd
== REG_PC
), BAD_PC
);
8737 constraint ((Rn
== REG_PC
), BAD_PC
);
8738 constraint ((Rm
== REG_PC
), BAD_PC
);
8740 inst
.instruction
|= Rd
<< 16;
8741 inst
.instruction
|= Rn
<< 0;
8742 inst
.instruction
|= Rm
<< 8;
8748 /* There is no IT instruction in ARM mode. We
8749 process it to do the validation as if in
8750 thumb mode, just in case the code gets
8751 assembled for thumb using the unified syntax. */
8756 set_it_insn_type (IT_INSN
);
8757 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8758 now_it
.cc
= inst
.operands
[0].imm
;
8762 /* If there is only one register in the register list,
8763 then return its register number. Otherwise return -1. */
8765 only_one_reg_in_list (int range
)
8767 int i
= ffs (range
) - 1;
8768 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8772 encode_ldmstm(int from_push_pop_mnem
)
8774 int base_reg
= inst
.operands
[0].reg
;
8775 int range
= inst
.operands
[1].imm
;
8778 inst
.instruction
|= base_reg
<< 16;
8779 inst
.instruction
|= range
;
8781 if (inst
.operands
[1].writeback
)
8782 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8784 if (inst
.operands
[0].writeback
)
8786 inst
.instruction
|= WRITE_BACK
;
8787 /* Check for unpredictable uses of writeback. */
8788 if (inst
.instruction
& LOAD_BIT
)
8790 /* Not allowed in LDM type 2. */
8791 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8792 && ((range
& (1 << REG_PC
)) == 0))
8793 as_warn (_("writeback of base register is UNPREDICTABLE"));
8794 /* Only allowed if base reg not in list for other types. */
8795 else if (range
& (1 << base_reg
))
8796 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8800 /* Not allowed for type 2. */
8801 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8802 as_warn (_("writeback of base register is UNPREDICTABLE"));
8803 /* Only allowed if base reg not in list, or first in list. */
8804 else if ((range
& (1 << base_reg
))
8805 && (range
& ((1 << base_reg
) - 1)))
8806 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8810 /* If PUSH/POP has only one register, then use the A2 encoding. */
8811 one_reg
= only_one_reg_in_list (range
);
8812 if (from_push_pop_mnem
&& one_reg
>= 0)
8814 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8816 inst
.instruction
&= A_COND_MASK
;
8817 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8818 inst
.instruction
|= one_reg
<< 12;
8825 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8828 /* ARMv5TE load-consecutive (argument parse)
8837 constraint (inst
.operands
[0].reg
% 2 != 0,
8838 _("first transfer register must be even"));
8839 constraint (inst
.operands
[1].present
8840 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8841 _("can only transfer two consecutive registers"));
8842 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8843 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8845 if (!inst
.operands
[1].present
)
8846 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8848 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8849 register and the first register written; we have to diagnose
8850 overlap between the base and the second register written here. */
8852 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8853 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8854 as_warn (_("base register written back, and overlaps "
8855 "second transfer register"));
8857 if (!(inst
.instruction
& V4_STR_BIT
))
8859 /* For an index-register load, the index register must not overlap the
8860 destination (even if not write-back). */
8861 if (inst
.operands
[2].immisreg
8862 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8863 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8864 as_warn (_("index register overlaps transfer register"));
8866 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8867 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8873 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8874 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8875 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8876 || inst
.operands
[1].negative
8877 /* This can arise if the programmer has written
8879 or if they have mistakenly used a register name as the last
8882 It is very difficult to distinguish between these two cases
8883 because "rX" might actually be a label. ie the register
8884 name has been occluded by a symbol of the same name. So we
8885 just generate a general 'bad addressing mode' type error
8886 message and leave it up to the programmer to discover the
8887 true cause and fix their mistake. */
8888 || (inst
.operands
[1].reg
== REG_PC
),
8891 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8892 || inst
.reloc
.exp
.X_add_number
!= 0,
8893 _("offset must be zero in ARM encoding"));
8895 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8897 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8898 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8899 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8905 constraint (inst
.operands
[0].reg
% 2 != 0,
8906 _("even register required"));
8907 constraint (inst
.operands
[1].present
8908 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8909 _("can only load two consecutive registers"));
8910 /* If op 1 were present and equal to PC, this function wouldn't
8911 have been called in the first place. */
8912 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8914 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8915 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8918 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8919 which is not a multiple of four is UNPREDICTABLE. */
8921 check_ldr_r15_aligned (void)
8923 constraint (!(inst
.operands
[1].immisreg
)
8924 && (inst
.operands
[0].reg
== REG_PC
8925 && inst
.operands
[1].reg
== REG_PC
8926 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8927 _("ldr to register 15 must be 4-byte alligned"));
8933 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8934 if (!inst
.operands
[1].isreg
)
8935 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8937 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8938 check_ldr_r15_aligned ();
8944 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8946 if (inst
.operands
[1].preind
)
8948 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8949 || inst
.reloc
.exp
.X_add_number
!= 0,
8950 _("this instruction requires a post-indexed address"));
8952 inst
.operands
[1].preind
= 0;
8953 inst
.operands
[1].postind
= 1;
8954 inst
.operands
[1].writeback
= 1;
8956 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8957 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
8960 /* Halfword and signed-byte load/store operations. */
8965 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8966 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8967 if (!inst
.operands
[1].isreg
)
8968 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
8970 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
8976 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8978 if (inst
.operands
[1].preind
)
8980 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8981 || inst
.reloc
.exp
.X_add_number
!= 0,
8982 _("this instruction requires a post-indexed address"));
8984 inst
.operands
[1].preind
= 0;
8985 inst
.operands
[1].postind
= 1;
8986 inst
.operands
[1].writeback
= 1;
8988 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8989 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
8992 /* Co-processor register load/store.
8993 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8997 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8998 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8999 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9005 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9006 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9007 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9008 && !(inst
.instruction
& 0x00400000))
9009 as_tsktsk (_("Rd and Rm should be different in mla"));
9011 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9012 inst
.instruction
|= inst
.operands
[1].reg
;
9013 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9014 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9020 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9021 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9023 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9024 encode_arm_shifter_operand (1);
9027 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9034 top
= (inst
.instruction
& 0x00400000) != 0;
9035 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
9036 _(":lower16: not allowed this instruction"));
9037 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
9038 _(":upper16: not allowed instruction"));
9039 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9040 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9042 imm
= inst
.reloc
.exp
.X_add_number
;
9043 /* The value is in two pieces: 0:11, 16:19. */
9044 inst
.instruction
|= (imm
& 0x00000fff);
9045 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9050 do_vfp_nsyn_mrs (void)
9052 if (inst
.operands
[0].isvec
)
9054 if (inst
.operands
[1].reg
!= 1)
9055 first_error (_("operand 1 must be FPSCR"));
9056 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9057 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9058 do_vfp_nsyn_opcode ("fmstat");
9060 else if (inst
.operands
[1].isvec
)
9061 do_vfp_nsyn_opcode ("fmrx");
9069 do_vfp_nsyn_msr (void)
9071 if (inst
.operands
[0].isvec
)
9072 do_vfp_nsyn_opcode ("fmxr");
9082 unsigned Rt
= inst
.operands
[0].reg
;
9084 if (thumb_mode
&& Rt
== REG_SP
)
9086 inst
.error
= BAD_SP
;
9090 /* APSR_ sets isvec. All other refs to PC are illegal. */
9091 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9093 inst
.error
= BAD_PC
;
9097 /* If we get through parsing the register name, we just insert the number
9098 generated into the instruction without further validation. */
9099 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9100 inst
.instruction
|= (Rt
<< 12);
9106 unsigned Rt
= inst
.operands
[1].reg
;
9109 reject_bad_reg (Rt
);
9110 else if (Rt
== REG_PC
)
9112 inst
.error
= BAD_PC
;
9116 /* If we get through parsing the register name, we just insert the number
9117 generated into the instruction without further validation. */
9118 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9119 inst
.instruction
|= (Rt
<< 12);
9127 if (do_vfp_nsyn_mrs () == SUCCESS
)
9130 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9131 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9133 if (inst
.operands
[1].isreg
)
9135 br
= inst
.operands
[1].reg
;
9136 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9137 as_bad (_("bad register for mrs"));
9141 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9142 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9144 _("'APSR', 'CPSR' or 'SPSR' expected"));
9145 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9148 inst
.instruction
|= br
;
9151 /* Two possible forms:
9152 "{C|S}PSR_<field>, Rm",
9153 "{C|S}PSR_f, #expression". */
9158 if (do_vfp_nsyn_msr () == SUCCESS
)
9161 inst
.instruction
|= inst
.operands
[0].imm
;
9162 if (inst
.operands
[1].isreg
)
9163 inst
.instruction
|= inst
.operands
[1].reg
;
9166 inst
.instruction
|= INST_IMMEDIATE
;
9167 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9168 inst
.reloc
.pc_rel
= 0;
9175 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9177 if (!inst
.operands
[2].present
)
9178 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9179 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9180 inst
.instruction
|= inst
.operands
[1].reg
;
9181 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9183 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9184 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9185 as_tsktsk (_("Rd and Rm should be different in mul"));
9188 /* Long Multiply Parser
9189 UMULL RdLo, RdHi, Rm, Rs
9190 SMULL RdLo, RdHi, Rm, Rs
9191 UMLAL RdLo, RdHi, Rm, Rs
9192 SMLAL RdLo, RdHi, Rm, Rs. */
9197 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9198 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9199 inst
.instruction
|= inst
.operands
[2].reg
;
9200 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9202 /* rdhi and rdlo must be different. */
9203 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9204 as_tsktsk (_("rdhi and rdlo must be different"));
9206 /* rdhi, rdlo and rm must all be different before armv6. */
9207 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9208 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9209 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9210 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9216 if (inst
.operands
[0].present
9217 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9219 /* Architectural NOP hints are CPSR sets with no bits selected. */
9220 inst
.instruction
&= 0xf0000000;
9221 inst
.instruction
|= 0x0320f000;
9222 if (inst
.operands
[0].present
)
9223 inst
.instruction
|= inst
.operands
[0].imm
;
9227 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9228 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9229 Condition defaults to COND_ALWAYS.
9230 Error if Rd, Rn or Rm are R15. */
9235 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9236 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9237 inst
.instruction
|= inst
.operands
[2].reg
;
9238 if (inst
.operands
[3].present
)
9239 encode_arm_shift (3);
9242 /* ARM V6 PKHTB (Argument Parse). */
9247 if (!inst
.operands
[3].present
)
9249 /* If the shift specifier is omitted, turn the instruction
9250 into pkhbt rd, rm, rn. */
9251 inst
.instruction
&= 0xfff00010;
9252 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9253 inst
.instruction
|= inst
.operands
[1].reg
;
9254 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9258 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9259 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9260 inst
.instruction
|= inst
.operands
[2].reg
;
9261 encode_arm_shift (3);
9265 /* ARMv5TE: Preload-Cache
9266 MP Extensions: Preload for write
9270 Syntactically, like LDR with B=1, W=0, L=1. */
9275 constraint (!inst
.operands
[0].isreg
,
9276 _("'[' expected after PLD mnemonic"));
9277 constraint (inst
.operands
[0].postind
,
9278 _("post-indexed expression used in preload instruction"));
9279 constraint (inst
.operands
[0].writeback
,
9280 _("writeback used in preload instruction"));
9281 constraint (!inst
.operands
[0].preind
,
9282 _("unindexed addressing used in preload instruction"));
9283 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9286 /* ARMv7: PLI <addr_mode> */
9290 constraint (!inst
.operands
[0].isreg
,
9291 _("'[' expected after PLI mnemonic"));
9292 constraint (inst
.operands
[0].postind
,
9293 _("post-indexed expression used in preload instruction"));
9294 constraint (inst
.operands
[0].writeback
,
9295 _("writeback used in preload instruction"));
9296 constraint (!inst
.operands
[0].preind
,
9297 _("unindexed addressing used in preload instruction"));
9298 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9299 inst
.instruction
&= ~PRE_INDEX
;
9305 constraint (inst
.operands
[0].writeback
,
9306 _("push/pop do not support {reglist}^"));
9307 inst
.operands
[1] = inst
.operands
[0];
9308 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9309 inst
.operands
[0].isreg
= 1;
9310 inst
.operands
[0].writeback
= 1;
9311 inst
.operands
[0].reg
= REG_SP
;
9312 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9315 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9316 word at the specified address and the following word
9318 Unconditionally executed.
9319 Error if Rn is R15. */
9324 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9325 if (inst
.operands
[0].writeback
)
9326 inst
.instruction
|= WRITE_BACK
;
9329 /* ARM V6 ssat (argument parse). */
9334 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9335 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9336 inst
.instruction
|= inst
.operands
[2].reg
;
9338 if (inst
.operands
[3].present
)
9339 encode_arm_shift (3);
9342 /* ARM V6 usat (argument parse). */
9347 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9348 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9349 inst
.instruction
|= inst
.operands
[2].reg
;
9351 if (inst
.operands
[3].present
)
9352 encode_arm_shift (3);
9355 /* ARM V6 ssat16 (argument parse). */
9360 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9361 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9362 inst
.instruction
|= inst
.operands
[2].reg
;
9368 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9369 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9370 inst
.instruction
|= inst
.operands
[2].reg
;
9373 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9374 preserving the other bits.
9376 setend <endian_specifier>, where <endian_specifier> is either
9382 if (warn_on_deprecated
9383 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9384 as_tsktsk (_("setend use is deprecated for ARMv8"));
9386 if (inst
.operands
[0].imm
)
9387 inst
.instruction
|= 0x200;
9393 unsigned int Rm
= (inst
.operands
[1].present
9394 ? inst
.operands
[1].reg
9395 : inst
.operands
[0].reg
);
9397 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9398 inst
.instruction
|= Rm
;
9399 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9401 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9402 inst
.instruction
|= SHIFT_BY_REG
;
9403 /* PR 12854: Error on extraneous shifts. */
9404 constraint (inst
.operands
[2].shifted
,
9405 _("extraneous shift as part of operand to shift insn"));
9408 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9414 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9415 inst
.reloc
.pc_rel
= 0;
9421 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9422 inst
.reloc
.pc_rel
= 0;
9428 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9429 inst
.reloc
.pc_rel
= 0;
9435 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9436 _("selected processor does not support SETPAN instruction"));
9438 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9444 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9445 _("selected processor does not support SETPAN instruction"));
9447 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9450 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9451 SMLAxy{cond} Rd,Rm,Rs,Rn
9452 SMLAWy{cond} Rd,Rm,Rs,Rn
9453 Error if any register is R15. */
9458 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9459 inst
.instruction
|= inst
.operands
[1].reg
;
9460 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9461 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9464 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9465 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9466 Error if any register is R15.
9467 Warning if Rdlo == Rdhi. */
9472 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9473 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9474 inst
.instruction
|= inst
.operands
[2].reg
;
9475 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9477 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9478 as_tsktsk (_("rdhi and rdlo must be different"));
9481 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9482 SMULxy{cond} Rd,Rm,Rs
9483 Error if any register is R15. */
9488 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9489 inst
.instruction
|= inst
.operands
[1].reg
;
9490 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9493 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9494 the same for both ARM and Thumb-2. */
9501 if (inst
.operands
[0].present
)
9503 reg
= inst
.operands
[0].reg
;
9504 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9509 inst
.instruction
|= reg
<< 16;
9510 inst
.instruction
|= inst
.operands
[1].imm
;
9511 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9512 inst
.instruction
|= WRITE_BACK
;
9515 /* ARM V6 strex (argument parse). */
9520 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9521 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9522 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9523 || inst
.operands
[2].negative
9524 /* See comment in do_ldrex(). */
9525 || (inst
.operands
[2].reg
== REG_PC
),
9528 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9529 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9531 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9532 || inst
.reloc
.exp
.X_add_number
!= 0,
9533 _("offset must be zero in ARM encoding"));
9535 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9536 inst
.instruction
|= inst
.operands
[1].reg
;
9537 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9538 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9544 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9545 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9546 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9547 || inst
.operands
[2].negative
,
9550 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9551 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9559 constraint (inst
.operands
[1].reg
% 2 != 0,
9560 _("even register required"));
9561 constraint (inst
.operands
[2].present
9562 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9563 _("can only store two consecutive registers"));
9564 /* If op 2 were present and equal to PC, this function wouldn't
9565 have been called in the first place. */
9566 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9568 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9569 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9570 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9573 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9574 inst
.instruction
|= inst
.operands
[1].reg
;
9575 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9582 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9583 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9591 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9592 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9597 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9598 extends it to 32-bits, and adds the result to a value in another
9599 register. You can specify a rotation by 0, 8, 16, or 24 bits
9600 before extracting the 16-bit value.
9601 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9602 Condition defaults to COND_ALWAYS.
9603 Error if any register uses R15. */
9608 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9609 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9610 inst
.instruction
|= inst
.operands
[2].reg
;
9611 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9616 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9617 Condition defaults to COND_ALWAYS.
9618 Error if any register uses R15. */
9623 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9624 inst
.instruction
|= inst
.operands
[1].reg
;
9625 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9628 /* VFP instructions. In a logical order: SP variant first, monad
9629 before dyad, arithmetic then move then load/store. */
9632 do_vfp_sp_monadic (void)
9634 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9635 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9639 do_vfp_sp_dyadic (void)
9641 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9642 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9643 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9647 do_vfp_sp_compare_z (void)
9649 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9653 do_vfp_dp_sp_cvt (void)
9655 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9656 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9660 do_vfp_sp_dp_cvt (void)
9662 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9663 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9667 do_vfp_reg_from_sp (void)
9669 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9670 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9674 do_vfp_reg2_from_sp2 (void)
9676 constraint (inst
.operands
[2].imm
!= 2,
9677 _("only two consecutive VFP SP registers allowed here"));
9678 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9679 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9680 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9684 do_vfp_sp_from_reg (void)
9686 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9687 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9691 do_vfp_sp2_from_reg2 (void)
9693 constraint (inst
.operands
[0].imm
!= 2,
9694 _("only two consecutive VFP SP registers allowed here"));
9695 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9696 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9697 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9701 do_vfp_sp_ldst (void)
9703 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9704 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9708 do_vfp_dp_ldst (void)
9710 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9711 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9716 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9718 if (inst
.operands
[0].writeback
)
9719 inst
.instruction
|= WRITE_BACK
;
9721 constraint (ldstm_type
!= VFP_LDSTMIA
,
9722 _("this addressing mode requires base-register writeback"));
9723 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9724 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9725 inst
.instruction
|= inst
.operands
[1].imm
;
9729 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9733 if (inst
.operands
[0].writeback
)
9734 inst
.instruction
|= WRITE_BACK
;
9736 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9737 _("this addressing mode requires base-register writeback"));
9739 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9740 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9742 count
= inst
.operands
[1].imm
<< 1;
9743 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9746 inst
.instruction
|= count
;
9750 do_vfp_sp_ldstmia (void)
9752 vfp_sp_ldstm (VFP_LDSTMIA
);
9756 do_vfp_sp_ldstmdb (void)
9758 vfp_sp_ldstm (VFP_LDSTMDB
);
9762 do_vfp_dp_ldstmia (void)
9764 vfp_dp_ldstm (VFP_LDSTMIA
);
9768 do_vfp_dp_ldstmdb (void)
9770 vfp_dp_ldstm (VFP_LDSTMDB
);
9774 do_vfp_xp_ldstmia (void)
9776 vfp_dp_ldstm (VFP_LDSTMIAX
);
9780 do_vfp_xp_ldstmdb (void)
9782 vfp_dp_ldstm (VFP_LDSTMDBX
);
9786 do_vfp_dp_rd_rm (void)
9788 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9789 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9793 do_vfp_dp_rn_rd (void)
9795 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9796 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9800 do_vfp_dp_rd_rn (void)
9802 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9803 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9807 do_vfp_dp_rd_rn_rm (void)
9809 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9810 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9811 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9817 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9821 do_vfp_dp_rm_rd_rn (void)
9823 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9824 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9825 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9828 /* VFPv3 instructions. */
9830 do_vfp_sp_const (void)
9832 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9833 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9834 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9838 do_vfp_dp_const (void)
9840 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9841 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9842 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9846 vfp_conv (int srcsize
)
9848 int immbits
= srcsize
- inst
.operands
[1].imm
;
9850 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9852 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9853 i.e. immbits must be in range 0 - 16. */
9854 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9857 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9859 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9860 i.e. immbits must be in range 0 - 31. */
9861 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9865 inst
.instruction
|= (immbits
& 1) << 5;
9866 inst
.instruction
|= (immbits
>> 1);
9870 do_vfp_sp_conv_16 (void)
9872 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9877 do_vfp_dp_conv_16 (void)
9879 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9884 do_vfp_sp_conv_32 (void)
9886 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9891 do_vfp_dp_conv_32 (void)
9893 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9897 /* FPA instructions. Also in a logical order. */
9902 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9903 inst
.instruction
|= inst
.operands
[1].reg
;
9907 do_fpa_ldmstm (void)
9909 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9910 switch (inst
.operands
[1].imm
)
9912 case 1: inst
.instruction
|= CP_T_X
; break;
9913 case 2: inst
.instruction
|= CP_T_Y
; break;
9914 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9919 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9921 /* The instruction specified "ea" or "fd", so we can only accept
9922 [Rn]{!}. The instruction does not really support stacking or
9923 unstacking, so we have to emulate these by setting appropriate
9924 bits and offsets. */
9925 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9926 || inst
.reloc
.exp
.X_add_number
!= 0,
9927 _("this instruction does not support indexing"));
9929 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9930 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9932 if (!(inst
.instruction
& INDEX_UP
))
9933 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9935 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9937 inst
.operands
[2].preind
= 0;
9938 inst
.operands
[2].postind
= 1;
9942 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9945 /* iWMMXt instructions: strictly in alphabetical order. */
9948 do_iwmmxt_tandorc (void)
9950 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
9954 do_iwmmxt_textrc (void)
9956 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9957 inst
.instruction
|= inst
.operands
[1].imm
;
9961 do_iwmmxt_textrm (void)
9963 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9964 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9965 inst
.instruction
|= inst
.operands
[2].imm
;
9969 do_iwmmxt_tinsr (void)
9971 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9972 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9973 inst
.instruction
|= inst
.operands
[2].imm
;
9977 do_iwmmxt_tmia (void)
9979 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
9980 inst
.instruction
|= inst
.operands
[1].reg
;
9981 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9985 do_iwmmxt_waligni (void)
9987 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9988 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9989 inst
.instruction
|= inst
.operands
[2].reg
;
9990 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
9994 do_iwmmxt_wmerge (void)
9996 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9997 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9998 inst
.instruction
|= inst
.operands
[2].reg
;
9999 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10003 do_iwmmxt_wmov (void)
10005 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10006 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10007 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10008 inst
.instruction
|= inst
.operands
[1].reg
;
10012 do_iwmmxt_wldstbh (void)
10015 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10017 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10019 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10020 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10024 do_iwmmxt_wldstw (void)
10026 /* RIWR_RIWC clears .isreg for a control register. */
10027 if (!inst
.operands
[0].isreg
)
10029 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10030 inst
.instruction
|= 0xf0000000;
10033 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10034 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10038 do_iwmmxt_wldstd (void)
10040 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10041 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10042 && inst
.operands
[1].immisreg
)
10044 inst
.instruction
&= ~0x1a000ff;
10045 inst
.instruction
|= (0xfU
<< 28);
10046 if (inst
.operands
[1].preind
)
10047 inst
.instruction
|= PRE_INDEX
;
10048 if (!inst
.operands
[1].negative
)
10049 inst
.instruction
|= INDEX_UP
;
10050 if (inst
.operands
[1].writeback
)
10051 inst
.instruction
|= WRITE_BACK
;
10052 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10053 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10054 inst
.instruction
|= inst
.operands
[1].imm
;
10057 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10061 do_iwmmxt_wshufh (void)
10063 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10064 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10065 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10066 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10070 do_iwmmxt_wzero (void)
10072 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10073 inst
.instruction
|= inst
.operands
[0].reg
;
10074 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10075 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10079 do_iwmmxt_wrwrwr_or_imm5 (void)
10081 if (inst
.operands
[2].isreg
)
10084 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10085 _("immediate operand requires iWMMXt2"));
10087 if (inst
.operands
[2].imm
== 0)
10089 switch ((inst
.instruction
>> 20) & 0xf)
10095 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10096 inst
.operands
[2].imm
= 16;
10097 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10103 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10104 inst
.operands
[2].imm
= 32;
10105 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10112 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10114 wrn
= (inst
.instruction
>> 16) & 0xf;
10115 inst
.instruction
&= 0xff0fff0f;
10116 inst
.instruction
|= wrn
;
10117 /* Bail out here; the instruction is now assembled. */
10122 /* Map 32 -> 0, etc. */
10123 inst
.operands
[2].imm
&= 0x1f;
10124 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10128 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10129 operations first, then control, shift, and load/store. */
10131 /* Insns like "foo X,Y,Z". */
10134 do_mav_triple (void)
10136 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10137 inst
.instruction
|= inst
.operands
[1].reg
;
10138 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10141 /* Insns like "foo W,X,Y,Z".
10142 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10147 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10148 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10149 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10150 inst
.instruction
|= inst
.operands
[3].reg
;
10153 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10155 do_mav_dspsc (void)
10157 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10160 /* Maverick shift immediate instructions.
10161 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10162 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10165 do_mav_shift (void)
10167 int imm
= inst
.operands
[2].imm
;
10169 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10170 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10172 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10173 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10174 Bit 4 should be 0. */
10175 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10177 inst
.instruction
|= imm
;
10180 /* XScale instructions. Also sorted arithmetic before move. */
10182 /* Xscale multiply-accumulate (argument parse)
10185 MIAxycc acc0,Rm,Rs. */
10190 inst
.instruction
|= inst
.operands
[1].reg
;
10191 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10194 /* Xscale move-accumulator-register (argument parse)
10196 MARcc acc0,RdLo,RdHi. */
10201 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10202 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10205 /* Xscale move-register-accumulator (argument parse)
10207 MRAcc RdLo,RdHi,acc0. */
10212 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10213 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10214 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10217 /* Encoding functions relevant only to Thumb. */
10219 /* inst.operands[i] is a shifted-register operand; encode
10220 it into inst.instruction in the format used by Thumb32. */
10223 encode_thumb32_shifted_operand (int i
)
10225 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10226 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10228 constraint (inst
.operands
[i
].immisreg
,
10229 _("shift by register not allowed in thumb mode"));
10230 inst
.instruction
|= inst
.operands
[i
].reg
;
10231 if (shift
== SHIFT_RRX
)
10232 inst
.instruction
|= SHIFT_ROR
<< 4;
10235 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10236 _("expression too complex"));
10238 constraint (value
> 32
10239 || (value
== 32 && (shift
== SHIFT_LSL
10240 || shift
== SHIFT_ROR
)),
10241 _("shift expression is too large"));
10245 else if (value
== 32)
10248 inst
.instruction
|= shift
<< 4;
10249 inst
.instruction
|= (value
& 0x1c) << 10;
10250 inst
.instruction
|= (value
& 0x03) << 6;
10255 /* inst.operands[i] was set up by parse_address. Encode it into a
10256 Thumb32 format load or store instruction. Reject forms that cannot
10257 be used with such instructions. If is_t is true, reject forms that
10258 cannot be used with a T instruction; if is_d is true, reject forms
10259 that cannot be used with a D instruction. If it is a store insn,
10260 reject PC in Rn. */
10263 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10265 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10267 constraint (!inst
.operands
[i
].isreg
,
10268 _("Instruction does not support =N addresses"));
10270 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10271 if (inst
.operands
[i
].immisreg
)
10273 constraint (is_pc
, BAD_PC_ADDRESSING
);
10274 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10275 constraint (inst
.operands
[i
].negative
,
10276 _("Thumb does not support negative register indexing"));
10277 constraint (inst
.operands
[i
].postind
,
10278 _("Thumb does not support register post-indexing"));
10279 constraint (inst
.operands
[i
].writeback
,
10280 _("Thumb does not support register indexing with writeback"));
10281 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10282 _("Thumb supports only LSL in shifted register indexing"));
10284 inst
.instruction
|= inst
.operands
[i
].imm
;
10285 if (inst
.operands
[i
].shifted
)
10287 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10288 _("expression too complex"));
10289 constraint (inst
.reloc
.exp
.X_add_number
< 0
10290 || inst
.reloc
.exp
.X_add_number
> 3,
10291 _("shift out of range"));
10292 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10294 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10296 else if (inst
.operands
[i
].preind
)
10298 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10299 constraint (is_t
&& inst
.operands
[i
].writeback
,
10300 _("cannot use writeback with this instruction"));
10301 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10302 BAD_PC_ADDRESSING
);
10306 inst
.instruction
|= 0x01000000;
10307 if (inst
.operands
[i
].writeback
)
10308 inst
.instruction
|= 0x00200000;
10312 inst
.instruction
|= 0x00000c00;
10313 if (inst
.operands
[i
].writeback
)
10314 inst
.instruction
|= 0x00000100;
10316 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10318 else if (inst
.operands
[i
].postind
)
10320 gas_assert (inst
.operands
[i
].writeback
);
10321 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10322 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10325 inst
.instruction
|= 0x00200000;
10327 inst
.instruction
|= 0x00000900;
10328 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10330 else /* unindexed - only for coprocessor */
10331 inst
.error
= _("instruction does not accept unindexed addressing");
10334 /* Table of Thumb instructions which exist in both 16- and 32-bit
10335 encodings (the latter only in post-V6T2 cores). The index is the
10336 value used in the insns table below. When there is more than one
10337 possible 16-bit encoding for the instruction, this table always
10339 Also contains several pseudo-instructions used during relaxation. */
10340 #define T16_32_TAB \
10341 X(_adc, 4140, eb400000), \
10342 X(_adcs, 4140, eb500000), \
10343 X(_add, 1c00, eb000000), \
10344 X(_adds, 1c00, eb100000), \
10345 X(_addi, 0000, f1000000), \
10346 X(_addis, 0000, f1100000), \
10347 X(_add_pc,000f, f20f0000), \
10348 X(_add_sp,000d, f10d0000), \
10349 X(_adr, 000f, f20f0000), \
10350 X(_and, 4000, ea000000), \
10351 X(_ands, 4000, ea100000), \
10352 X(_asr, 1000, fa40f000), \
10353 X(_asrs, 1000, fa50f000), \
10354 X(_b, e000, f000b000), \
10355 X(_bcond, d000, f0008000), \
10356 X(_bic, 4380, ea200000), \
10357 X(_bics, 4380, ea300000), \
10358 X(_cmn, 42c0, eb100f00), \
10359 X(_cmp, 2800, ebb00f00), \
10360 X(_cpsie, b660, f3af8400), \
10361 X(_cpsid, b670, f3af8600), \
10362 X(_cpy, 4600, ea4f0000), \
10363 X(_dec_sp,80dd, f1ad0d00), \
10364 X(_eor, 4040, ea800000), \
10365 X(_eors, 4040, ea900000), \
10366 X(_inc_sp,00dd, f10d0d00), \
10367 X(_ldmia, c800, e8900000), \
10368 X(_ldr, 6800, f8500000), \
10369 X(_ldrb, 7800, f8100000), \
10370 X(_ldrh, 8800, f8300000), \
10371 X(_ldrsb, 5600, f9100000), \
10372 X(_ldrsh, 5e00, f9300000), \
10373 X(_ldr_pc,4800, f85f0000), \
10374 X(_ldr_pc2,4800, f85f0000), \
10375 X(_ldr_sp,9800, f85d0000), \
10376 X(_lsl, 0000, fa00f000), \
10377 X(_lsls, 0000, fa10f000), \
10378 X(_lsr, 0800, fa20f000), \
10379 X(_lsrs, 0800, fa30f000), \
10380 X(_mov, 2000, ea4f0000), \
10381 X(_movs, 2000, ea5f0000), \
10382 X(_mul, 4340, fb00f000), \
10383 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10384 X(_mvn, 43c0, ea6f0000), \
10385 X(_mvns, 43c0, ea7f0000), \
10386 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10387 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10388 X(_orr, 4300, ea400000), \
10389 X(_orrs, 4300, ea500000), \
10390 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10391 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10392 X(_rev, ba00, fa90f080), \
10393 X(_rev16, ba40, fa90f090), \
10394 X(_revsh, bac0, fa90f0b0), \
10395 X(_ror, 41c0, fa60f000), \
10396 X(_rors, 41c0, fa70f000), \
10397 X(_sbc, 4180, eb600000), \
10398 X(_sbcs, 4180, eb700000), \
10399 X(_stmia, c000, e8800000), \
10400 X(_str, 6000, f8400000), \
10401 X(_strb, 7000, f8000000), \
10402 X(_strh, 8000, f8200000), \
10403 X(_str_sp,9000, f84d0000), \
10404 X(_sub, 1e00, eba00000), \
10405 X(_subs, 1e00, ebb00000), \
10406 X(_subi, 8000, f1a00000), \
10407 X(_subis, 8000, f1b00000), \
10408 X(_sxtb, b240, fa4ff080), \
10409 X(_sxth, b200, fa0ff080), \
10410 X(_tst, 4200, ea100f00), \
10411 X(_uxtb, b2c0, fa5ff080), \
10412 X(_uxth, b280, fa1ff080), \
10413 X(_nop, bf00, f3af8000), \
10414 X(_yield, bf10, f3af8001), \
10415 X(_wfe, bf20, f3af8002), \
10416 X(_wfi, bf30, f3af8003), \
10417 X(_sev, bf40, f3af8004), \
10418 X(_sevl, bf50, f3af8005), \
10419 X(_udf, de00, f7f0a000)
10421 /* To catch errors in encoding functions, the codes are all offset by
10422 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10423 as 16-bit instructions. */
10424 #define X(a,b,c) T_MNEM##a
10425 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10428 #define X(a,b,c) 0x##b
10429 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10430 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10433 #define X(a,b,c) 0x##c
10434 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10435 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10436 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10440 /* Thumb instruction encoders, in alphabetical order. */
10442 /* ADDW or SUBW. */
10445 do_t_add_sub_w (void)
10449 Rd
= inst
.operands
[0].reg
;
10450 Rn
= inst
.operands
[1].reg
;
10452 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10453 is the SP-{plus,minus}-immediate form of the instruction. */
10455 constraint (Rd
== REG_PC
, BAD_PC
);
10457 reject_bad_reg (Rd
);
10459 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10460 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10463 /* Parse an add or subtract instruction. We get here with inst.instruction
10464 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10467 do_t_add_sub (void)
10471 Rd
= inst
.operands
[0].reg
;
10472 Rs
= (inst
.operands
[1].present
10473 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10474 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10477 set_it_insn_type_last ();
10479 if (unified_syntax
)
10482 bfd_boolean narrow
;
10485 flags
= (inst
.instruction
== T_MNEM_adds
10486 || inst
.instruction
== T_MNEM_subs
);
10488 narrow
= !in_it_block ();
10490 narrow
= in_it_block ();
10491 if (!inst
.operands
[2].isreg
)
10495 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10497 add
= (inst
.instruction
== T_MNEM_add
10498 || inst
.instruction
== T_MNEM_adds
);
10500 if (inst
.size_req
!= 4)
10502 /* Attempt to use a narrow opcode, with relaxation if
10504 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10505 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10506 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10507 opcode
= T_MNEM_add_sp
;
10508 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10509 opcode
= T_MNEM_add_pc
;
10510 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10513 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10515 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10519 inst
.instruction
= THUMB_OP16(opcode
);
10520 inst
.instruction
|= (Rd
<< 4) | Rs
;
10521 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10522 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10524 if (inst
.size_req
== 2)
10525 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10527 inst
.relax
= opcode
;
10531 constraint (inst
.size_req
== 2, BAD_HIREG
);
10533 if (inst
.size_req
== 4
10534 || (inst
.size_req
!= 2 && !opcode
))
10536 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10537 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
10538 THUMB1_RELOC_ONLY
);
10541 constraint (add
, BAD_PC
);
10542 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10543 _("only SUBS PC, LR, #const allowed"));
10544 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10545 _("expression too complex"));
10546 constraint (inst
.reloc
.exp
.X_add_number
< 0
10547 || inst
.reloc
.exp
.X_add_number
> 0xff,
10548 _("immediate value out of range"));
10549 inst
.instruction
= T2_SUBS_PC_LR
10550 | inst
.reloc
.exp
.X_add_number
;
10551 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10554 else if (Rs
== REG_PC
)
10556 /* Always use addw/subw. */
10557 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10558 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10562 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10563 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10566 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10568 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10570 inst
.instruction
|= Rd
<< 8;
10571 inst
.instruction
|= Rs
<< 16;
10576 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10577 unsigned int shift
= inst
.operands
[2].shift_kind
;
10579 Rn
= inst
.operands
[2].reg
;
10580 /* See if we can do this with a 16-bit instruction. */
10581 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10583 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10588 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10589 || inst
.instruction
== T_MNEM_add
)
10591 : T_OPCODE_SUB_R3
);
10592 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10596 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10598 /* Thumb-1 cores (except v6-M) require at least one high
10599 register in a narrow non flag setting add. */
10600 if (Rd
> 7 || Rn
> 7
10601 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10602 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10609 inst
.instruction
= T_OPCODE_ADD_HI
;
10610 inst
.instruction
|= (Rd
& 8) << 4;
10611 inst
.instruction
|= (Rd
& 7);
10612 inst
.instruction
|= Rn
<< 3;
10618 constraint (Rd
== REG_PC
, BAD_PC
);
10619 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10620 constraint (Rs
== REG_PC
, BAD_PC
);
10621 reject_bad_reg (Rn
);
10623 /* If we get here, it can't be done in 16 bits. */
10624 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10625 _("shift must be constant"));
10626 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10627 inst
.instruction
|= Rd
<< 8;
10628 inst
.instruction
|= Rs
<< 16;
10629 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10630 _("shift value over 3 not allowed in thumb mode"));
10631 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10632 _("only LSL shift allowed in thumb mode"));
10633 encode_thumb32_shifted_operand (2);
10638 constraint (inst
.instruction
== T_MNEM_adds
10639 || inst
.instruction
== T_MNEM_subs
,
10642 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10644 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10645 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10648 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10649 ? 0x0000 : 0x8000);
10650 inst
.instruction
|= (Rd
<< 4) | Rs
;
10651 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10655 Rn
= inst
.operands
[2].reg
;
10656 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10658 /* We now have Rd, Rs, and Rn set to registers. */
10659 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10661 /* Can't do this for SUB. */
10662 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10663 inst
.instruction
= T_OPCODE_ADD_HI
;
10664 inst
.instruction
|= (Rd
& 8) << 4;
10665 inst
.instruction
|= (Rd
& 7);
10667 inst
.instruction
|= Rn
<< 3;
10669 inst
.instruction
|= Rs
<< 3;
10671 constraint (1, _("dest must overlap one source register"));
10675 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10676 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10677 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10687 Rd
= inst
.operands
[0].reg
;
10688 reject_bad_reg (Rd
);
10690 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10692 /* Defer to section relaxation. */
10693 inst
.relax
= inst
.instruction
;
10694 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10695 inst
.instruction
|= Rd
<< 4;
10697 else if (unified_syntax
&& inst
.size_req
!= 2)
10699 /* Generate a 32-bit opcode. */
10700 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10701 inst
.instruction
|= Rd
<< 8;
10702 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10703 inst
.reloc
.pc_rel
= 1;
10707 /* Generate a 16-bit opcode. */
10708 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10709 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10710 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10711 inst
.reloc
.pc_rel
= 1;
10713 inst
.instruction
|= Rd
<< 4;
10717 /* Arithmetic instructions for which there is just one 16-bit
10718 instruction encoding, and it allows only two low registers.
10719 For maximal compatibility with ARM syntax, we allow three register
10720 operands even when Thumb-32 instructions are not available, as long
10721 as the first two are identical. For instance, both "sbc r0,r1" and
10722 "sbc r0,r0,r1" are allowed. */
10728 Rd
= inst
.operands
[0].reg
;
10729 Rs
= (inst
.operands
[1].present
10730 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10731 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10732 Rn
= inst
.operands
[2].reg
;
10734 reject_bad_reg (Rd
);
10735 reject_bad_reg (Rs
);
10736 if (inst
.operands
[2].isreg
)
10737 reject_bad_reg (Rn
);
10739 if (unified_syntax
)
10741 if (!inst
.operands
[2].isreg
)
10743 /* For an immediate, we always generate a 32-bit opcode;
10744 section relaxation will shrink it later if possible. */
10745 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10746 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10747 inst
.instruction
|= Rd
<< 8;
10748 inst
.instruction
|= Rs
<< 16;
10749 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10753 bfd_boolean narrow
;
10755 /* See if we can do this with a 16-bit instruction. */
10756 if (THUMB_SETS_FLAGS (inst
.instruction
))
10757 narrow
= !in_it_block ();
10759 narrow
= in_it_block ();
10761 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10763 if (inst
.operands
[2].shifted
)
10765 if (inst
.size_req
== 4)
10771 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10772 inst
.instruction
|= Rd
;
10773 inst
.instruction
|= Rn
<< 3;
10777 /* If we get here, it can't be done in 16 bits. */
10778 constraint (inst
.operands
[2].shifted
10779 && inst
.operands
[2].immisreg
,
10780 _("shift must be constant"));
10781 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10782 inst
.instruction
|= Rd
<< 8;
10783 inst
.instruction
|= Rs
<< 16;
10784 encode_thumb32_shifted_operand (2);
10789 /* On its face this is a lie - the instruction does set the
10790 flags. However, the only supported mnemonic in this mode
10791 says it doesn't. */
10792 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10794 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10795 _("unshifted register required"));
10796 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10797 constraint (Rd
!= Rs
,
10798 _("dest and source1 must be the same register"));
10800 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10801 inst
.instruction
|= Rd
;
10802 inst
.instruction
|= Rn
<< 3;
10806 /* Similarly, but for instructions where the arithmetic operation is
10807 commutative, so we can allow either of them to be different from
10808 the destination operand in a 16-bit instruction. For instance, all
10809 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10816 Rd
= inst
.operands
[0].reg
;
10817 Rs
= (inst
.operands
[1].present
10818 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10819 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10820 Rn
= inst
.operands
[2].reg
;
10822 reject_bad_reg (Rd
);
10823 reject_bad_reg (Rs
);
10824 if (inst
.operands
[2].isreg
)
10825 reject_bad_reg (Rn
);
10827 if (unified_syntax
)
10829 if (!inst
.operands
[2].isreg
)
10831 /* For an immediate, we always generate a 32-bit opcode;
10832 section relaxation will shrink it later if possible. */
10833 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10834 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10835 inst
.instruction
|= Rd
<< 8;
10836 inst
.instruction
|= Rs
<< 16;
10837 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10841 bfd_boolean narrow
;
10843 /* See if we can do this with a 16-bit instruction. */
10844 if (THUMB_SETS_FLAGS (inst
.instruction
))
10845 narrow
= !in_it_block ();
10847 narrow
= in_it_block ();
10849 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10851 if (inst
.operands
[2].shifted
)
10853 if (inst
.size_req
== 4)
10860 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10861 inst
.instruction
|= Rd
;
10862 inst
.instruction
|= Rn
<< 3;
10867 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10868 inst
.instruction
|= Rd
;
10869 inst
.instruction
|= Rs
<< 3;
10874 /* If we get here, it can't be done in 16 bits. */
10875 constraint (inst
.operands
[2].shifted
10876 && inst
.operands
[2].immisreg
,
10877 _("shift must be constant"));
10878 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10879 inst
.instruction
|= Rd
<< 8;
10880 inst
.instruction
|= Rs
<< 16;
10881 encode_thumb32_shifted_operand (2);
10886 /* On its face this is a lie - the instruction does set the
10887 flags. However, the only supported mnemonic in this mode
10888 says it doesn't. */
10889 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10891 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10892 _("unshifted register required"));
10893 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10895 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10896 inst
.instruction
|= Rd
;
10899 inst
.instruction
|= Rn
<< 3;
10901 inst
.instruction
|= Rs
<< 3;
10903 constraint (1, _("dest must overlap one source register"));
10911 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10912 constraint (msb
> 32, _("bit-field extends past end of register"));
10913 /* The instruction encoding stores the LSB and MSB,
10914 not the LSB and width. */
10915 Rd
= inst
.operands
[0].reg
;
10916 reject_bad_reg (Rd
);
10917 inst
.instruction
|= Rd
<< 8;
10918 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10919 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10920 inst
.instruction
|= msb
- 1;
10929 Rd
= inst
.operands
[0].reg
;
10930 reject_bad_reg (Rd
);
10932 /* #0 in second position is alternative syntax for bfc, which is
10933 the same instruction but with REG_PC in the Rm field. */
10934 if (!inst
.operands
[1].isreg
)
10938 Rn
= inst
.operands
[1].reg
;
10939 reject_bad_reg (Rn
);
10942 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
10943 constraint (msb
> 32, _("bit-field extends past end of register"));
10944 /* The instruction encoding stores the LSB and MSB,
10945 not the LSB and width. */
10946 inst
.instruction
|= Rd
<< 8;
10947 inst
.instruction
|= Rn
<< 16;
10948 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10949 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10950 inst
.instruction
|= msb
- 1;
10958 Rd
= inst
.operands
[0].reg
;
10959 Rn
= inst
.operands
[1].reg
;
10961 reject_bad_reg (Rd
);
10962 reject_bad_reg (Rn
);
10964 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
10965 _("bit-field extends past end of register"));
10966 inst
.instruction
|= Rd
<< 8;
10967 inst
.instruction
|= Rn
<< 16;
10968 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10969 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10970 inst
.instruction
|= inst
.operands
[3].imm
- 1;
10973 /* ARM V5 Thumb BLX (argument parse)
10974 BLX <target_addr> which is BLX(1)
10975 BLX <Rm> which is BLX(2)
10976 Unfortunately, there are two different opcodes for this mnemonic.
10977 So, the insns[].value is not used, and the code here zaps values
10978 into inst.instruction.
10980 ??? How to take advantage of the additional two bits of displacement
10981 available in Thumb32 mode? Need new relocation? */
10986 set_it_insn_type_last ();
10988 if (inst
.operands
[0].isreg
)
10990 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
10991 /* We have a register, so this is BLX(2). */
10992 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
10996 /* No register. This must be BLX(1). */
10997 inst
.instruction
= 0xf000e800;
10998 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11007 bfd_reloc_code_real_type reloc
;
11010 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
11012 if (in_it_block ())
11014 /* Conditional branches inside IT blocks are encoded as unconditional
11016 cond
= COND_ALWAYS
;
11021 if (cond
!= COND_ALWAYS
)
11022 opcode
= T_MNEM_bcond
;
11024 opcode
= inst
.instruction
;
11027 && (inst
.size_req
== 4
11028 || (inst
.size_req
!= 2
11029 && (inst
.operands
[0].hasreloc
11030 || inst
.reloc
.exp
.X_op
== O_constant
))))
11032 inst
.instruction
= THUMB_OP32(opcode
);
11033 if (cond
== COND_ALWAYS
)
11034 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11037 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11038 _("selected architecture does not support "
11039 "wide conditional branch instruction"));
11041 gas_assert (cond
!= 0xF);
11042 inst
.instruction
|= cond
<< 22;
11043 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11048 inst
.instruction
= THUMB_OP16(opcode
);
11049 if (cond
== COND_ALWAYS
)
11050 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11053 inst
.instruction
|= cond
<< 8;
11054 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11056 /* Allow section relaxation. */
11057 if (unified_syntax
&& inst
.size_req
!= 2)
11058 inst
.relax
= opcode
;
11060 inst
.reloc
.type
= reloc
;
11061 inst
.reloc
.pc_rel
= 1;
11064 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11065 between the two is the maximum immediate allowed - which is passed in
11068 do_t_bkpt_hlt1 (int range
)
11070 constraint (inst
.cond
!= COND_ALWAYS
,
11071 _("instruction is always unconditional"));
11072 if (inst
.operands
[0].present
)
11074 constraint (inst
.operands
[0].imm
> range
,
11075 _("immediate value out of range"));
11076 inst
.instruction
|= inst
.operands
[0].imm
;
11079 set_it_insn_type (NEUTRAL_IT_INSN
);
11085 do_t_bkpt_hlt1 (63);
11091 do_t_bkpt_hlt1 (255);
11095 do_t_branch23 (void)
11097 set_it_insn_type_last ();
11098 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11100 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11101 this file. We used to simply ignore the PLT reloc type here --
11102 the branch encoding is now needed to deal with TLSCALL relocs.
11103 So if we see a PLT reloc now, put it back to how it used to be to
11104 keep the preexisting behaviour. */
11105 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11106 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11108 #if defined(OBJ_COFF)
11109 /* If the destination of the branch is a defined symbol which does not have
11110 the THUMB_FUNC attribute, then we must be calling a function which has
11111 the (interfacearm) attribute. We look for the Thumb entry point to that
11112 function and change the branch to refer to that function instead. */
11113 if ( inst
.reloc
.exp
.X_op
== O_symbol
11114 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11115 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11116 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11117 inst
.reloc
.exp
.X_add_symbol
=
11118 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11125 set_it_insn_type_last ();
11126 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11127 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11128 should cause the alignment to be checked once it is known. This is
11129 because BX PC only works if the instruction is word aligned. */
11137 set_it_insn_type_last ();
11138 Rm
= inst
.operands
[0].reg
;
11139 reject_bad_reg (Rm
);
11140 inst
.instruction
|= Rm
<< 16;
11149 Rd
= inst
.operands
[0].reg
;
11150 Rm
= inst
.operands
[1].reg
;
11152 reject_bad_reg (Rd
);
11153 reject_bad_reg (Rm
);
11155 inst
.instruction
|= Rd
<< 8;
11156 inst
.instruction
|= Rm
<< 16;
11157 inst
.instruction
|= Rm
;
11163 set_it_insn_type (OUTSIDE_IT_INSN
);
11164 inst
.instruction
|= inst
.operands
[0].imm
;
11170 set_it_insn_type (OUTSIDE_IT_INSN
);
11172 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11173 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11175 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11176 inst
.instruction
= 0xf3af8000;
11177 inst
.instruction
|= imod
<< 9;
11178 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11179 if (inst
.operands
[1].present
)
11180 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11184 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11185 && (inst
.operands
[0].imm
& 4),
11186 _("selected processor does not support 'A' form "
11187 "of this instruction"));
11188 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11189 _("Thumb does not support the 2-argument "
11190 "form of this instruction"));
11191 inst
.instruction
|= inst
.operands
[0].imm
;
11195 /* THUMB CPY instruction (argument parse). */
11200 if (inst
.size_req
== 4)
11202 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11203 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11204 inst
.instruction
|= inst
.operands
[1].reg
;
11208 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11209 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11210 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11217 set_it_insn_type (OUTSIDE_IT_INSN
);
11218 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11219 inst
.instruction
|= inst
.operands
[0].reg
;
11220 inst
.reloc
.pc_rel
= 1;
11221 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11227 inst
.instruction
|= inst
.operands
[0].imm
;
11233 unsigned Rd
, Rn
, Rm
;
11235 Rd
= inst
.operands
[0].reg
;
11236 Rn
= (inst
.operands
[1].present
11237 ? inst
.operands
[1].reg
: Rd
);
11238 Rm
= inst
.operands
[2].reg
;
11240 reject_bad_reg (Rd
);
11241 reject_bad_reg (Rn
);
11242 reject_bad_reg (Rm
);
11244 inst
.instruction
|= Rd
<< 8;
11245 inst
.instruction
|= Rn
<< 16;
11246 inst
.instruction
|= Rm
;
11252 if (unified_syntax
&& inst
.size_req
== 4)
11253 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11255 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11261 unsigned int cond
= inst
.operands
[0].imm
;
11263 set_it_insn_type (IT_INSN
);
11264 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11266 now_it
.warn_deprecated
= FALSE
;
11268 /* If the condition is a negative condition, invert the mask. */
11269 if ((cond
& 0x1) == 0x0)
11271 unsigned int mask
= inst
.instruction
& 0x000f;
11273 if ((mask
& 0x7) == 0)
11275 /* No conversion needed. */
11276 now_it
.block_length
= 1;
11278 else if ((mask
& 0x3) == 0)
11281 now_it
.block_length
= 2;
11283 else if ((mask
& 0x1) == 0)
11286 now_it
.block_length
= 3;
11291 now_it
.block_length
= 4;
11294 inst
.instruction
&= 0xfff0;
11295 inst
.instruction
|= mask
;
11298 inst
.instruction
|= cond
<< 4;
11301 /* Helper function used for both push/pop and ldm/stm. */
11303 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11307 load
= (inst
.instruction
& (1 << 20)) != 0;
11309 if (mask
& (1 << 13))
11310 inst
.error
= _("SP not allowed in register list");
11312 if ((mask
& (1 << base
)) != 0
11314 inst
.error
= _("having the base register in the register list when "
11315 "using write back is UNPREDICTABLE");
11319 if (mask
& (1 << 15))
11321 if (mask
& (1 << 14))
11322 inst
.error
= _("LR and PC should not both be in register list");
11324 set_it_insn_type_last ();
11329 if (mask
& (1 << 15))
11330 inst
.error
= _("PC not allowed in register list");
11333 if ((mask
& (mask
- 1)) == 0)
11335 /* Single register transfers implemented as str/ldr. */
11338 if (inst
.instruction
& (1 << 23))
11339 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11341 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11345 if (inst
.instruction
& (1 << 23))
11346 inst
.instruction
= 0x00800000; /* ia -> [base] */
11348 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11351 inst
.instruction
|= 0xf8400000;
11353 inst
.instruction
|= 0x00100000;
11355 mask
= ffs (mask
) - 1;
11358 else if (writeback
)
11359 inst
.instruction
|= WRITE_BACK
;
11361 inst
.instruction
|= mask
;
11362 inst
.instruction
|= base
<< 16;
11368 /* This really doesn't seem worth it. */
11369 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11370 _("expression too complex"));
11371 constraint (inst
.operands
[1].writeback
,
11372 _("Thumb load/store multiple does not support {reglist}^"));
11374 if (unified_syntax
)
11376 bfd_boolean narrow
;
11380 /* See if we can use a 16-bit instruction. */
11381 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11382 && inst
.size_req
!= 4
11383 && !(inst
.operands
[1].imm
& ~0xff))
11385 mask
= 1 << inst
.operands
[0].reg
;
11387 if (inst
.operands
[0].reg
<= 7)
11389 if (inst
.instruction
== T_MNEM_stmia
11390 ? inst
.operands
[0].writeback
11391 : (inst
.operands
[0].writeback
11392 == !(inst
.operands
[1].imm
& mask
)))
11394 if (inst
.instruction
== T_MNEM_stmia
11395 && (inst
.operands
[1].imm
& mask
)
11396 && (inst
.operands
[1].imm
& (mask
- 1)))
11397 as_warn (_("value stored for r%d is UNKNOWN"),
11398 inst
.operands
[0].reg
);
11400 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11401 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11402 inst
.instruction
|= inst
.operands
[1].imm
;
11405 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11407 /* This means 1 register in reg list one of 3 situations:
11408 1. Instruction is stmia, but without writeback.
11409 2. lmdia without writeback, but with Rn not in
11411 3. ldmia with writeback, but with Rn in reglist.
11412 Case 3 is UNPREDICTABLE behaviour, so we handle
11413 case 1 and 2 which can be converted into a 16-bit
11414 str or ldr. The SP cases are handled below. */
11415 unsigned long opcode
;
11416 /* First, record an error for Case 3. */
11417 if (inst
.operands
[1].imm
& mask
11418 && inst
.operands
[0].writeback
)
11420 _("having the base register in the register list when "
11421 "using write back is UNPREDICTABLE");
11423 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11425 inst
.instruction
= THUMB_OP16 (opcode
);
11426 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11427 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11431 else if (inst
.operands
[0] .reg
== REG_SP
)
11433 if (inst
.operands
[0].writeback
)
11436 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11437 ? T_MNEM_push
: T_MNEM_pop
);
11438 inst
.instruction
|= inst
.operands
[1].imm
;
11441 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11444 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11445 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11446 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11454 if (inst
.instruction
< 0xffff)
11455 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11457 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11458 inst
.operands
[0].writeback
);
11463 constraint (inst
.operands
[0].reg
> 7
11464 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11465 constraint (inst
.instruction
!= T_MNEM_ldmia
11466 && inst
.instruction
!= T_MNEM_stmia
,
11467 _("Thumb-2 instruction only valid in unified syntax"));
11468 if (inst
.instruction
== T_MNEM_stmia
)
11470 if (!inst
.operands
[0].writeback
)
11471 as_warn (_("this instruction will write back the base register"));
11472 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11473 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11474 as_warn (_("value stored for r%d is UNKNOWN"),
11475 inst
.operands
[0].reg
);
11479 if (!inst
.operands
[0].writeback
11480 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11481 as_warn (_("this instruction will write back the base register"));
11482 else if (inst
.operands
[0].writeback
11483 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11484 as_warn (_("this instruction will not write back the base register"));
11487 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11488 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11489 inst
.instruction
|= inst
.operands
[1].imm
;
11496 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11497 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11498 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11499 || inst
.operands
[1].negative
,
11502 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11504 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11505 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11506 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11512 if (!inst
.operands
[1].present
)
11514 constraint (inst
.operands
[0].reg
== REG_LR
,
11515 _("r14 not allowed as first register "
11516 "when second register is omitted"));
11517 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11519 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11522 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11523 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11524 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11530 unsigned long opcode
;
11533 if (inst
.operands
[0].isreg
11534 && !inst
.operands
[0].preind
11535 && inst
.operands
[0].reg
== REG_PC
)
11536 set_it_insn_type_last ();
11538 opcode
= inst
.instruction
;
11539 if (unified_syntax
)
11541 if (!inst
.operands
[1].isreg
)
11543 if (opcode
<= 0xffff)
11544 inst
.instruction
= THUMB_OP32 (opcode
);
11545 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11548 if (inst
.operands
[1].isreg
11549 && !inst
.operands
[1].writeback
11550 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11551 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11552 && opcode
<= 0xffff
11553 && inst
.size_req
!= 4)
11555 /* Insn may have a 16-bit form. */
11556 Rn
= inst
.operands
[1].reg
;
11557 if (inst
.operands
[1].immisreg
)
11559 inst
.instruction
= THUMB_OP16 (opcode
);
11561 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11563 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11564 reject_bad_reg (inst
.operands
[1].imm
);
11566 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11567 && opcode
!= T_MNEM_ldrsb
)
11568 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11569 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11576 if (inst
.reloc
.pc_rel
)
11577 opcode
= T_MNEM_ldr_pc2
;
11579 opcode
= T_MNEM_ldr_pc
;
11583 if (opcode
== T_MNEM_ldr
)
11584 opcode
= T_MNEM_ldr_sp
;
11586 opcode
= T_MNEM_str_sp
;
11588 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11592 inst
.instruction
= inst
.operands
[0].reg
;
11593 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11595 inst
.instruction
|= THUMB_OP16 (opcode
);
11596 if (inst
.size_req
== 2)
11597 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11599 inst
.relax
= opcode
;
11603 /* Definitely a 32-bit variant. */
11605 /* Warning for Erratum 752419. */
11606 if (opcode
== T_MNEM_ldr
11607 && inst
.operands
[0].reg
== REG_SP
11608 && inst
.operands
[1].writeback
== 1
11609 && !inst
.operands
[1].immisreg
)
11611 if (no_cpu_selected ()
11612 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11613 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11614 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11615 as_warn (_("This instruction may be unpredictable "
11616 "if executed on M-profile cores "
11617 "with interrupts enabled."));
11620 /* Do some validations regarding addressing modes. */
11621 if (inst
.operands
[1].immisreg
)
11622 reject_bad_reg (inst
.operands
[1].imm
);
11624 constraint (inst
.operands
[1].writeback
== 1
11625 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11628 inst
.instruction
= THUMB_OP32 (opcode
);
11629 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11630 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11631 check_ldr_r15_aligned ();
11635 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11637 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11639 /* Only [Rn,Rm] is acceptable. */
11640 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11641 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11642 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11643 || inst
.operands
[1].negative
,
11644 _("Thumb does not support this addressing mode"));
11645 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11649 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11650 if (!inst
.operands
[1].isreg
)
11651 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11654 constraint (!inst
.operands
[1].preind
11655 || inst
.operands
[1].shifted
11656 || inst
.operands
[1].writeback
,
11657 _("Thumb does not support this addressing mode"));
11658 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11660 constraint (inst
.instruction
& 0x0600,
11661 _("byte or halfword not valid for base register"));
11662 constraint (inst
.operands
[1].reg
== REG_PC
11663 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11664 _("r15 based store not allowed"));
11665 constraint (inst
.operands
[1].immisreg
,
11666 _("invalid base register for register offset"));
11668 if (inst
.operands
[1].reg
== REG_PC
)
11669 inst
.instruction
= T_OPCODE_LDR_PC
;
11670 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11671 inst
.instruction
= T_OPCODE_LDR_SP
;
11673 inst
.instruction
= T_OPCODE_STR_SP
;
11675 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11676 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11680 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11681 if (!inst
.operands
[1].immisreg
)
11683 /* Immediate offset. */
11684 inst
.instruction
|= inst
.operands
[0].reg
;
11685 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11686 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11690 /* Register offset. */
11691 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11692 constraint (inst
.operands
[1].negative
,
11693 _("Thumb does not support this addressing mode"));
11696 switch (inst
.instruction
)
11698 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11699 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11700 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11701 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11702 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11703 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11704 case 0x5600 /* ldrsb */:
11705 case 0x5e00 /* ldrsh */: break;
11709 inst
.instruction
|= inst
.operands
[0].reg
;
11710 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11711 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11717 if (!inst
.operands
[1].present
)
11719 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11720 constraint (inst
.operands
[0].reg
== REG_LR
,
11721 _("r14 not allowed here"));
11722 constraint (inst
.operands
[0].reg
== REG_R12
,
11723 _("r12 not allowed here"));
11726 if (inst
.operands
[2].writeback
11727 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11728 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11729 as_warn (_("base register written back, and overlaps "
11730 "one of transfer registers"));
11732 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11733 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11734 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11740 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11741 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11747 unsigned Rd
, Rn
, Rm
, Ra
;
11749 Rd
= inst
.operands
[0].reg
;
11750 Rn
= inst
.operands
[1].reg
;
11751 Rm
= inst
.operands
[2].reg
;
11752 Ra
= inst
.operands
[3].reg
;
11754 reject_bad_reg (Rd
);
11755 reject_bad_reg (Rn
);
11756 reject_bad_reg (Rm
);
11757 reject_bad_reg (Ra
);
11759 inst
.instruction
|= Rd
<< 8;
11760 inst
.instruction
|= Rn
<< 16;
11761 inst
.instruction
|= Rm
;
11762 inst
.instruction
|= Ra
<< 12;
11768 unsigned RdLo
, RdHi
, Rn
, Rm
;
11770 RdLo
= inst
.operands
[0].reg
;
11771 RdHi
= inst
.operands
[1].reg
;
11772 Rn
= inst
.operands
[2].reg
;
11773 Rm
= inst
.operands
[3].reg
;
11775 reject_bad_reg (RdLo
);
11776 reject_bad_reg (RdHi
);
11777 reject_bad_reg (Rn
);
11778 reject_bad_reg (Rm
);
11780 inst
.instruction
|= RdLo
<< 12;
11781 inst
.instruction
|= RdHi
<< 8;
11782 inst
.instruction
|= Rn
<< 16;
11783 inst
.instruction
|= Rm
;
11787 do_t_mov_cmp (void)
11791 Rn
= inst
.operands
[0].reg
;
11792 Rm
= inst
.operands
[1].reg
;
11795 set_it_insn_type_last ();
11797 if (unified_syntax
)
11799 int r0off
= (inst
.instruction
== T_MNEM_mov
11800 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11801 unsigned long opcode
;
11802 bfd_boolean narrow
;
11803 bfd_boolean low_regs
;
11805 low_regs
= (Rn
<= 7 && Rm
<= 7);
11806 opcode
= inst
.instruction
;
11807 if (in_it_block ())
11808 narrow
= opcode
!= T_MNEM_movs
;
11810 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11811 if (inst
.size_req
== 4
11812 || inst
.operands
[1].shifted
)
11815 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11816 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11817 && !inst
.operands
[1].shifted
11821 inst
.instruction
= T2_SUBS_PC_LR
;
11825 if (opcode
== T_MNEM_cmp
)
11827 constraint (Rn
== REG_PC
, BAD_PC
);
11830 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11832 warn_deprecated_sp (Rm
);
11833 /* R15 was documented as a valid choice for Rm in ARMv6,
11834 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11835 tools reject R15, so we do too. */
11836 constraint (Rm
== REG_PC
, BAD_PC
);
11839 reject_bad_reg (Rm
);
11841 else if (opcode
== T_MNEM_mov
11842 || opcode
== T_MNEM_movs
)
11844 if (inst
.operands
[1].isreg
)
11846 if (opcode
== T_MNEM_movs
)
11848 reject_bad_reg (Rn
);
11849 reject_bad_reg (Rm
);
11853 /* This is mov.n. */
11854 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11855 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11857 as_tsktsk (_("Use of r%u as a source register is "
11858 "deprecated when r%u is the destination "
11859 "register."), Rm
, Rn
);
11864 /* This is mov.w. */
11865 constraint (Rn
== REG_PC
, BAD_PC
);
11866 constraint (Rm
== REG_PC
, BAD_PC
);
11867 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11871 reject_bad_reg (Rn
);
11874 if (!inst
.operands
[1].isreg
)
11876 /* Immediate operand. */
11877 if (!in_it_block () && opcode
== T_MNEM_mov
)
11879 if (low_regs
&& narrow
)
11881 inst
.instruction
= THUMB_OP16 (opcode
);
11882 inst
.instruction
|= Rn
<< 8;
11883 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11884 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11886 if (inst
.size_req
== 2)
11887 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11889 inst
.relax
= opcode
;
11894 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11895 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
11896 THUMB1_RELOC_ONLY
);
11898 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11899 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11900 inst
.instruction
|= Rn
<< r0off
;
11901 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11904 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11905 && (inst
.instruction
== T_MNEM_mov
11906 || inst
.instruction
== T_MNEM_movs
))
11908 /* Register shifts are encoded as separate shift instructions. */
11909 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11911 if (in_it_block ())
11916 if (inst
.size_req
== 4)
11919 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11925 switch (inst
.operands
[1].shift_kind
)
11928 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11931 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11934 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11937 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
11943 inst
.instruction
= opcode
;
11946 inst
.instruction
|= Rn
;
11947 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
11952 inst
.instruction
|= CONDS_BIT
;
11954 inst
.instruction
|= Rn
<< 8;
11955 inst
.instruction
|= Rm
<< 16;
11956 inst
.instruction
|= inst
.operands
[1].imm
;
11961 /* Some mov with immediate shift have narrow variants.
11962 Register shifts are handled above. */
11963 if (low_regs
&& inst
.operands
[1].shifted
11964 && (inst
.instruction
== T_MNEM_mov
11965 || inst
.instruction
== T_MNEM_movs
))
11967 if (in_it_block ())
11968 narrow
= (inst
.instruction
== T_MNEM_mov
);
11970 narrow
= (inst
.instruction
== T_MNEM_movs
);
11975 switch (inst
.operands
[1].shift_kind
)
11977 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11978 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11979 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11980 default: narrow
= FALSE
; break;
11986 inst
.instruction
|= Rn
;
11987 inst
.instruction
|= Rm
<< 3;
11988 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11992 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11993 inst
.instruction
|= Rn
<< r0off
;
11994 encode_thumb32_shifted_operand (1);
11998 switch (inst
.instruction
)
12001 /* In v4t or v5t a move of two lowregs produces unpredictable
12002 results. Don't allow this. */
12005 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12006 "MOV Rd, Rs with two low registers is not "
12007 "permitted on this architecture");
12008 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12012 inst
.instruction
= T_OPCODE_MOV_HR
;
12013 inst
.instruction
|= (Rn
& 0x8) << 4;
12014 inst
.instruction
|= (Rn
& 0x7);
12015 inst
.instruction
|= Rm
<< 3;
12019 /* We know we have low registers at this point.
12020 Generate LSLS Rd, Rs, #0. */
12021 inst
.instruction
= T_OPCODE_LSL_I
;
12022 inst
.instruction
|= Rn
;
12023 inst
.instruction
|= Rm
<< 3;
12029 inst
.instruction
= T_OPCODE_CMP_LR
;
12030 inst
.instruction
|= Rn
;
12031 inst
.instruction
|= Rm
<< 3;
12035 inst
.instruction
= T_OPCODE_CMP_HR
;
12036 inst
.instruction
|= (Rn
& 0x8) << 4;
12037 inst
.instruction
|= (Rn
& 0x7);
12038 inst
.instruction
|= Rm
<< 3;
12045 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12047 /* PR 10443: Do not silently ignore shifted operands. */
12048 constraint (inst
.operands
[1].shifted
,
12049 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12051 if (inst
.operands
[1].isreg
)
12053 if (Rn
< 8 && Rm
< 8)
12055 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12056 since a MOV instruction produces unpredictable results. */
12057 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12058 inst
.instruction
= T_OPCODE_ADD_I3
;
12060 inst
.instruction
= T_OPCODE_CMP_LR
;
12062 inst
.instruction
|= Rn
;
12063 inst
.instruction
|= Rm
<< 3;
12067 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12068 inst
.instruction
= T_OPCODE_MOV_HR
;
12070 inst
.instruction
= T_OPCODE_CMP_HR
;
12076 constraint (Rn
> 7,
12077 _("only lo regs allowed with immediate"));
12078 inst
.instruction
|= Rn
<< 8;
12079 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
12090 top
= (inst
.instruction
& 0x00800000) != 0;
12091 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
12093 constraint (top
, _(":lower16: not allowed this instruction"));
12094 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12096 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12098 constraint (!top
, _(":upper16: not allowed this instruction"));
12099 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12102 Rd
= inst
.operands
[0].reg
;
12103 reject_bad_reg (Rd
);
12105 inst
.instruction
|= Rd
<< 8;
12106 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12108 imm
= inst
.reloc
.exp
.X_add_number
;
12109 inst
.instruction
|= (imm
& 0xf000) << 4;
12110 inst
.instruction
|= (imm
& 0x0800) << 15;
12111 inst
.instruction
|= (imm
& 0x0700) << 4;
12112 inst
.instruction
|= (imm
& 0x00ff);
12117 do_t_mvn_tst (void)
12121 Rn
= inst
.operands
[0].reg
;
12122 Rm
= inst
.operands
[1].reg
;
12124 if (inst
.instruction
== T_MNEM_cmp
12125 || inst
.instruction
== T_MNEM_cmn
)
12126 constraint (Rn
== REG_PC
, BAD_PC
);
12128 reject_bad_reg (Rn
);
12129 reject_bad_reg (Rm
);
12131 if (unified_syntax
)
12133 int r0off
= (inst
.instruction
== T_MNEM_mvn
12134 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12135 bfd_boolean narrow
;
12137 if (inst
.size_req
== 4
12138 || inst
.instruction
> 0xffff
12139 || inst
.operands
[1].shifted
12140 || Rn
> 7 || Rm
> 7)
12142 else if (inst
.instruction
== T_MNEM_cmn
12143 || inst
.instruction
== T_MNEM_tst
)
12145 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12146 narrow
= !in_it_block ();
12148 narrow
= in_it_block ();
12150 if (!inst
.operands
[1].isreg
)
12152 /* For an immediate, we always generate a 32-bit opcode;
12153 section relaxation will shrink it later if possible. */
12154 if (inst
.instruction
< 0xffff)
12155 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12156 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12157 inst
.instruction
|= Rn
<< r0off
;
12158 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12162 /* See if we can do this with a 16-bit instruction. */
12165 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12166 inst
.instruction
|= Rn
;
12167 inst
.instruction
|= Rm
<< 3;
12171 constraint (inst
.operands
[1].shifted
12172 && inst
.operands
[1].immisreg
,
12173 _("shift must be constant"));
12174 if (inst
.instruction
< 0xffff)
12175 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12176 inst
.instruction
|= Rn
<< r0off
;
12177 encode_thumb32_shifted_operand (1);
12183 constraint (inst
.instruction
> 0xffff
12184 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12185 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12186 _("unshifted register required"));
12187 constraint (Rn
> 7 || Rm
> 7,
12190 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12191 inst
.instruction
|= Rn
;
12192 inst
.instruction
|= Rm
<< 3;
12201 if (do_vfp_nsyn_mrs () == SUCCESS
)
12204 Rd
= inst
.operands
[0].reg
;
12205 reject_bad_reg (Rd
);
12206 inst
.instruction
|= Rd
<< 8;
12208 if (inst
.operands
[1].isreg
)
12210 unsigned br
= inst
.operands
[1].reg
;
12211 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12212 as_bad (_("bad register for mrs"));
12214 inst
.instruction
|= br
& (0xf << 16);
12215 inst
.instruction
|= (br
& 0x300) >> 4;
12216 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12220 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12222 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12224 /* PR gas/12698: The constraint is only applied for m_profile.
12225 If the user has specified -march=all, we want to ignore it as
12226 we are building for any CPU type, including non-m variants. */
12227 bfd_boolean m_profile
=
12228 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12229 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12230 "not support requested special purpose register"));
12233 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12235 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12236 _("'APSR', 'CPSR' or 'SPSR' expected"));
12238 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12239 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12240 inst
.instruction
|= 0xf0000;
12250 if (do_vfp_nsyn_msr () == SUCCESS
)
12253 constraint (!inst
.operands
[1].isreg
,
12254 _("Thumb encoding does not support an immediate here"));
12256 if (inst
.operands
[0].isreg
)
12257 flags
= (int)(inst
.operands
[0].reg
);
12259 flags
= inst
.operands
[0].imm
;
12261 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12263 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12265 /* PR gas/12698: The constraint is only applied for m_profile.
12266 If the user has specified -march=all, we want to ignore it as
12267 we are building for any CPU type, including non-m variants. */
12268 bfd_boolean m_profile
=
12269 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12270 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12271 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12272 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12273 && bits
!= PSR_f
)) && m_profile
,
12274 _("selected processor does not support requested special "
12275 "purpose register"));
12278 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12279 "requested special purpose register"));
12281 Rn
= inst
.operands
[1].reg
;
12282 reject_bad_reg (Rn
);
12284 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12285 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12286 inst
.instruction
|= (flags
& 0x300) >> 4;
12287 inst
.instruction
|= (flags
& 0xff);
12288 inst
.instruction
|= Rn
<< 16;
12294 bfd_boolean narrow
;
12295 unsigned Rd
, Rn
, Rm
;
12297 if (!inst
.operands
[2].present
)
12298 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12300 Rd
= inst
.operands
[0].reg
;
12301 Rn
= inst
.operands
[1].reg
;
12302 Rm
= inst
.operands
[2].reg
;
12304 if (unified_syntax
)
12306 if (inst
.size_req
== 4
12312 else if (inst
.instruction
== T_MNEM_muls
)
12313 narrow
= !in_it_block ();
12315 narrow
= in_it_block ();
12319 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12320 constraint (Rn
> 7 || Rm
> 7,
12327 /* 16-bit MULS/Conditional MUL. */
12328 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12329 inst
.instruction
|= Rd
;
12332 inst
.instruction
|= Rm
<< 3;
12334 inst
.instruction
|= Rn
<< 3;
12336 constraint (1, _("dest must overlap one source register"));
12340 constraint (inst
.instruction
!= T_MNEM_mul
,
12341 _("Thumb-2 MUL must not set flags"));
12343 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12344 inst
.instruction
|= Rd
<< 8;
12345 inst
.instruction
|= Rn
<< 16;
12346 inst
.instruction
|= Rm
<< 0;
12348 reject_bad_reg (Rd
);
12349 reject_bad_reg (Rn
);
12350 reject_bad_reg (Rm
);
12357 unsigned RdLo
, RdHi
, Rn
, Rm
;
12359 RdLo
= inst
.operands
[0].reg
;
12360 RdHi
= inst
.operands
[1].reg
;
12361 Rn
= inst
.operands
[2].reg
;
12362 Rm
= inst
.operands
[3].reg
;
12364 reject_bad_reg (RdLo
);
12365 reject_bad_reg (RdHi
);
12366 reject_bad_reg (Rn
);
12367 reject_bad_reg (Rm
);
12369 inst
.instruction
|= RdLo
<< 12;
12370 inst
.instruction
|= RdHi
<< 8;
12371 inst
.instruction
|= Rn
<< 16;
12372 inst
.instruction
|= Rm
;
12375 as_tsktsk (_("rdhi and rdlo must be different"));
12381 set_it_insn_type (NEUTRAL_IT_INSN
);
12383 if (unified_syntax
)
12385 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12387 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12388 inst
.instruction
|= inst
.operands
[0].imm
;
12392 /* PR9722: Check for Thumb2 availability before
12393 generating a thumb2 nop instruction. */
12394 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12396 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12397 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12400 inst
.instruction
= 0x46c0;
12405 constraint (inst
.operands
[0].present
,
12406 _("Thumb does not support NOP with hints"));
12407 inst
.instruction
= 0x46c0;
12414 if (unified_syntax
)
12416 bfd_boolean narrow
;
12418 if (THUMB_SETS_FLAGS (inst
.instruction
))
12419 narrow
= !in_it_block ();
12421 narrow
= in_it_block ();
12422 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12424 if (inst
.size_req
== 4)
12429 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12430 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12431 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12435 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12436 inst
.instruction
|= inst
.operands
[0].reg
;
12437 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12442 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12444 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12446 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12447 inst
.instruction
|= inst
.operands
[0].reg
;
12448 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12457 Rd
= inst
.operands
[0].reg
;
12458 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12460 reject_bad_reg (Rd
);
12461 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12462 reject_bad_reg (Rn
);
12464 inst
.instruction
|= Rd
<< 8;
12465 inst
.instruction
|= Rn
<< 16;
12467 if (!inst
.operands
[2].isreg
)
12469 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12470 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12476 Rm
= inst
.operands
[2].reg
;
12477 reject_bad_reg (Rm
);
12479 constraint (inst
.operands
[2].shifted
12480 && inst
.operands
[2].immisreg
,
12481 _("shift must be constant"));
12482 encode_thumb32_shifted_operand (2);
12489 unsigned Rd
, Rn
, Rm
;
12491 Rd
= inst
.operands
[0].reg
;
12492 Rn
= inst
.operands
[1].reg
;
12493 Rm
= inst
.operands
[2].reg
;
12495 reject_bad_reg (Rd
);
12496 reject_bad_reg (Rn
);
12497 reject_bad_reg (Rm
);
12499 inst
.instruction
|= Rd
<< 8;
12500 inst
.instruction
|= Rn
<< 16;
12501 inst
.instruction
|= Rm
;
12502 if (inst
.operands
[3].present
)
12504 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12505 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12506 _("expression too complex"));
12507 inst
.instruction
|= (val
& 0x1c) << 10;
12508 inst
.instruction
|= (val
& 0x03) << 6;
12515 if (!inst
.operands
[3].present
)
12519 inst
.instruction
&= ~0x00000020;
12521 /* PR 10168. Swap the Rm and Rn registers. */
12522 Rtmp
= inst
.operands
[1].reg
;
12523 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12524 inst
.operands
[2].reg
= Rtmp
;
12532 if (inst
.operands
[0].immisreg
)
12533 reject_bad_reg (inst
.operands
[0].imm
);
12535 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12539 do_t_push_pop (void)
12543 constraint (inst
.operands
[0].writeback
,
12544 _("push/pop do not support {reglist}^"));
12545 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12546 _("expression too complex"));
12548 mask
= inst
.operands
[0].imm
;
12549 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12550 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12551 else if (inst
.size_req
!= 4
12552 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
12553 ? REG_LR
: REG_PC
)))
12555 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12556 inst
.instruction
|= THUMB_PP_PC_LR
;
12557 inst
.instruction
|= mask
& 0xff;
12559 else if (unified_syntax
)
12561 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12562 encode_thumb2_ldmstm (13, mask
, TRUE
);
12566 inst
.error
= _("invalid register list to push/pop instruction");
12576 Rd
= inst
.operands
[0].reg
;
12577 Rm
= inst
.operands
[1].reg
;
12579 reject_bad_reg (Rd
);
12580 reject_bad_reg (Rm
);
12582 inst
.instruction
|= Rd
<< 8;
12583 inst
.instruction
|= Rm
<< 16;
12584 inst
.instruction
|= Rm
;
12592 Rd
= inst
.operands
[0].reg
;
12593 Rm
= inst
.operands
[1].reg
;
12595 reject_bad_reg (Rd
);
12596 reject_bad_reg (Rm
);
12598 if (Rd
<= 7 && Rm
<= 7
12599 && inst
.size_req
!= 4)
12601 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12602 inst
.instruction
|= Rd
;
12603 inst
.instruction
|= Rm
<< 3;
12605 else if (unified_syntax
)
12607 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12608 inst
.instruction
|= Rd
<< 8;
12609 inst
.instruction
|= Rm
<< 16;
12610 inst
.instruction
|= Rm
;
12613 inst
.error
= BAD_HIREG
;
12621 Rd
= inst
.operands
[0].reg
;
12622 Rm
= inst
.operands
[1].reg
;
12624 reject_bad_reg (Rd
);
12625 reject_bad_reg (Rm
);
12627 inst
.instruction
|= Rd
<< 8;
12628 inst
.instruction
|= Rm
;
12636 Rd
= inst
.operands
[0].reg
;
12637 Rs
= (inst
.operands
[1].present
12638 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12639 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12641 reject_bad_reg (Rd
);
12642 reject_bad_reg (Rs
);
12643 if (inst
.operands
[2].isreg
)
12644 reject_bad_reg (inst
.operands
[2].reg
);
12646 inst
.instruction
|= Rd
<< 8;
12647 inst
.instruction
|= Rs
<< 16;
12648 if (!inst
.operands
[2].isreg
)
12650 bfd_boolean narrow
;
12652 if ((inst
.instruction
& 0x00100000) != 0)
12653 narrow
= !in_it_block ();
12655 narrow
= in_it_block ();
12657 if (Rd
> 7 || Rs
> 7)
12660 if (inst
.size_req
== 4 || !unified_syntax
)
12663 if (inst
.reloc
.exp
.X_op
!= O_constant
12664 || inst
.reloc
.exp
.X_add_number
!= 0)
12667 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12668 relaxation, but it doesn't seem worth the hassle. */
12671 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12672 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12673 inst
.instruction
|= Rs
<< 3;
12674 inst
.instruction
|= Rd
;
12678 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12679 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12683 encode_thumb32_shifted_operand (2);
12689 if (warn_on_deprecated
12690 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12691 as_tsktsk (_("setend use is deprecated for ARMv8"));
12693 set_it_insn_type (OUTSIDE_IT_INSN
);
12694 if (inst
.operands
[0].imm
)
12695 inst
.instruction
|= 0x8;
12701 if (!inst
.operands
[1].present
)
12702 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12704 if (unified_syntax
)
12706 bfd_boolean narrow
;
12709 switch (inst
.instruction
)
12712 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12714 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12716 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12718 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12722 if (THUMB_SETS_FLAGS (inst
.instruction
))
12723 narrow
= !in_it_block ();
12725 narrow
= in_it_block ();
12726 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12728 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12730 if (inst
.operands
[2].isreg
12731 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12732 || inst
.operands
[2].reg
> 7))
12734 if (inst
.size_req
== 4)
12737 reject_bad_reg (inst
.operands
[0].reg
);
12738 reject_bad_reg (inst
.operands
[1].reg
);
12742 if (inst
.operands
[2].isreg
)
12744 reject_bad_reg (inst
.operands
[2].reg
);
12745 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12746 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12747 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12748 inst
.instruction
|= inst
.operands
[2].reg
;
12750 /* PR 12854: Error on extraneous shifts. */
12751 constraint (inst
.operands
[2].shifted
,
12752 _("extraneous shift as part of operand to shift insn"));
12756 inst
.operands
[1].shifted
= 1;
12757 inst
.operands
[1].shift_kind
= shift_kind
;
12758 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12759 ? T_MNEM_movs
: T_MNEM_mov
);
12760 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12761 encode_thumb32_shifted_operand (1);
12762 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12763 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12768 if (inst
.operands
[2].isreg
)
12770 switch (shift_kind
)
12772 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12773 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12774 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12775 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12779 inst
.instruction
|= inst
.operands
[0].reg
;
12780 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12782 /* PR 12854: Error on extraneous shifts. */
12783 constraint (inst
.operands
[2].shifted
,
12784 _("extraneous shift as part of operand to shift insn"));
12788 switch (shift_kind
)
12790 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12791 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12792 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12795 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12796 inst
.instruction
|= inst
.operands
[0].reg
;
12797 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12803 constraint (inst
.operands
[0].reg
> 7
12804 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12805 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12807 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12809 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12810 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12811 _("source1 and dest must be same register"));
12813 switch (inst
.instruction
)
12815 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12816 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12817 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12818 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12822 inst
.instruction
|= inst
.operands
[0].reg
;
12823 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12825 /* PR 12854: Error on extraneous shifts. */
12826 constraint (inst
.operands
[2].shifted
,
12827 _("extraneous shift as part of operand to shift insn"));
12831 switch (inst
.instruction
)
12833 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12834 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12835 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12836 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12839 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12840 inst
.instruction
|= inst
.operands
[0].reg
;
12841 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12849 unsigned Rd
, Rn
, Rm
;
12851 Rd
= inst
.operands
[0].reg
;
12852 Rn
= inst
.operands
[1].reg
;
12853 Rm
= inst
.operands
[2].reg
;
12855 reject_bad_reg (Rd
);
12856 reject_bad_reg (Rn
);
12857 reject_bad_reg (Rm
);
12859 inst
.instruction
|= Rd
<< 8;
12860 inst
.instruction
|= Rn
<< 16;
12861 inst
.instruction
|= Rm
;
12867 unsigned Rd
, Rn
, Rm
;
12869 Rd
= inst
.operands
[0].reg
;
12870 Rm
= inst
.operands
[1].reg
;
12871 Rn
= inst
.operands
[2].reg
;
12873 reject_bad_reg (Rd
);
12874 reject_bad_reg (Rn
);
12875 reject_bad_reg (Rm
);
12877 inst
.instruction
|= Rd
<< 8;
12878 inst
.instruction
|= Rn
<< 16;
12879 inst
.instruction
|= Rm
;
12885 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12886 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12887 _("SMC is not permitted on this architecture"));
12888 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12889 _("expression too complex"));
12890 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12891 inst
.instruction
|= (value
& 0xf000) >> 12;
12892 inst
.instruction
|= (value
& 0x0ff0);
12893 inst
.instruction
|= (value
& 0x000f) << 16;
12894 /* PR gas/15623: SMC instructions must be last in an IT block. */
12895 set_it_insn_type_last ();
12901 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12903 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12904 inst
.instruction
|= (value
& 0x0fff);
12905 inst
.instruction
|= (value
& 0xf000) << 4;
12909 do_t_ssat_usat (int bias
)
12913 Rd
= inst
.operands
[0].reg
;
12914 Rn
= inst
.operands
[2].reg
;
12916 reject_bad_reg (Rd
);
12917 reject_bad_reg (Rn
);
12919 inst
.instruction
|= Rd
<< 8;
12920 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12921 inst
.instruction
|= Rn
<< 16;
12923 if (inst
.operands
[3].present
)
12925 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12927 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12929 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12930 _("expression too complex"));
12932 if (shift_amount
!= 0)
12934 constraint (shift_amount
> 31,
12935 _("shift expression is too large"));
12937 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
12938 inst
.instruction
|= 0x00200000; /* sh bit. */
12940 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
12941 inst
.instruction
|= (shift_amount
& 0x03) << 6;
12949 do_t_ssat_usat (1);
12957 Rd
= inst
.operands
[0].reg
;
12958 Rn
= inst
.operands
[2].reg
;
12960 reject_bad_reg (Rd
);
12961 reject_bad_reg (Rn
);
12963 inst
.instruction
|= Rd
<< 8;
12964 inst
.instruction
|= inst
.operands
[1].imm
- 1;
12965 inst
.instruction
|= Rn
<< 16;
12971 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
12972 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
12973 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
12974 || inst
.operands
[2].negative
,
12977 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
12979 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12980 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12981 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12982 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12988 if (!inst
.operands
[2].present
)
12989 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
12991 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
12992 || inst
.operands
[0].reg
== inst
.operands
[2].reg
12993 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
12996 inst
.instruction
|= inst
.operands
[0].reg
;
12997 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12998 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
12999 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13005 unsigned Rd
, Rn
, Rm
;
13007 Rd
= inst
.operands
[0].reg
;
13008 Rn
= inst
.operands
[1].reg
;
13009 Rm
= inst
.operands
[2].reg
;
13011 reject_bad_reg (Rd
);
13012 reject_bad_reg (Rn
);
13013 reject_bad_reg (Rm
);
13015 inst
.instruction
|= Rd
<< 8;
13016 inst
.instruction
|= Rn
<< 16;
13017 inst
.instruction
|= Rm
;
13018 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13026 Rd
= inst
.operands
[0].reg
;
13027 Rm
= inst
.operands
[1].reg
;
13029 reject_bad_reg (Rd
);
13030 reject_bad_reg (Rm
);
13032 if (inst
.instruction
<= 0xffff
13033 && inst
.size_req
!= 4
13034 && Rd
<= 7 && Rm
<= 7
13035 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13037 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13038 inst
.instruction
|= Rd
;
13039 inst
.instruction
|= Rm
<< 3;
13041 else if (unified_syntax
)
13043 if (inst
.instruction
<= 0xffff)
13044 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13045 inst
.instruction
|= Rd
<< 8;
13046 inst
.instruction
|= Rm
;
13047 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13051 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13052 _("Thumb encoding does not support rotation"));
13053 constraint (1, BAD_HIREG
);
13060 /* We have to do the following check manually as ARM_EXT_OS only applies
13062 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
13064 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
13065 /* This only applies to the v6m howver, not later architectures. */
13066 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
13067 as_bad (_("SVC is not permitted on this architecture"));
13068 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
13071 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
13080 half
= (inst
.instruction
& 0x10) != 0;
13081 set_it_insn_type_last ();
13082 constraint (inst
.operands
[0].immisreg
,
13083 _("instruction requires register index"));
13085 Rn
= inst
.operands
[0].reg
;
13086 Rm
= inst
.operands
[0].imm
;
13088 constraint (Rn
== REG_SP
, BAD_SP
);
13089 reject_bad_reg (Rm
);
13091 constraint (!half
&& inst
.operands
[0].shifted
,
13092 _("instruction does not allow shifted index"));
13093 inst
.instruction
|= (Rn
<< 16) | Rm
;
13099 if (!inst
.operands
[0].present
)
13100 inst
.operands
[0].imm
= 0;
13102 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13104 constraint (inst
.size_req
== 2,
13105 _("immediate value out of range"));
13106 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13107 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13108 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13112 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13113 inst
.instruction
|= inst
.operands
[0].imm
;
13116 set_it_insn_type (NEUTRAL_IT_INSN
);
13123 do_t_ssat_usat (0);
13131 Rd
= inst
.operands
[0].reg
;
13132 Rn
= inst
.operands
[2].reg
;
13134 reject_bad_reg (Rd
);
13135 reject_bad_reg (Rn
);
13137 inst
.instruction
|= Rd
<< 8;
13138 inst
.instruction
|= inst
.operands
[1].imm
;
13139 inst
.instruction
|= Rn
<< 16;
13142 /* Neon instruction encoder helpers. */
13144 /* Encodings for the different types for various Neon opcodes. */
13146 /* An "invalid" code for the following tables. */
13149 struct neon_tab_entry
13152 unsigned float_or_poly
;
13153 unsigned scalar_or_imm
;
13156 /* Map overloaded Neon opcodes to their respective encodings. */
13157 #define NEON_ENC_TAB \
13158 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13159 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13160 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13161 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13162 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13163 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13164 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13165 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13166 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13167 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13168 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13169 /* Register variants of the following two instructions are encoded as
13170 vcge / vcgt with the operands reversed. */ \
13171 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13172 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13173 X(vfma, N_INV, 0x0000c10, N_INV), \
13174 X(vfms, N_INV, 0x0200c10, N_INV), \
13175 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13176 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13177 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13178 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13179 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13180 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13181 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13182 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13183 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13184 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13185 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13186 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13187 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13188 X(vshl, 0x0000400, N_INV, 0x0800510), \
13189 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13190 X(vand, 0x0000110, N_INV, 0x0800030), \
13191 X(vbic, 0x0100110, N_INV, 0x0800030), \
13192 X(veor, 0x1000110, N_INV, N_INV), \
13193 X(vorn, 0x0300110, N_INV, 0x0800010), \
13194 X(vorr, 0x0200110, N_INV, 0x0800010), \
13195 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13196 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13197 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13198 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13199 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13200 X(vst1, 0x0000000, 0x0800000, N_INV), \
13201 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13202 X(vst2, 0x0000100, 0x0800100, N_INV), \
13203 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13204 X(vst3, 0x0000200, 0x0800200, N_INV), \
13205 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13206 X(vst4, 0x0000300, 0x0800300, N_INV), \
13207 X(vmovn, 0x1b20200, N_INV, N_INV), \
13208 X(vtrn, 0x1b20080, N_INV, N_INV), \
13209 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13210 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13211 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13212 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13213 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13214 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13215 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13216 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13217 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13218 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13219 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13220 X(vseleq, 0xe000a00, N_INV, N_INV), \
13221 X(vselvs, 0xe100a00, N_INV, N_INV), \
13222 X(vselge, 0xe200a00, N_INV, N_INV), \
13223 X(vselgt, 0xe300a00, N_INV, N_INV), \
13224 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13225 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13226 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13227 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13228 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13229 X(aes, 0x3b00300, N_INV, N_INV), \
13230 X(sha3op, 0x2000c00, N_INV, N_INV), \
13231 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13232 X(sha2op, 0x3ba0380, N_INV, N_INV)
13236 #define X(OPC,I,F,S) N_MNEM_##OPC
13241 static const struct neon_tab_entry neon_enc_tab
[] =
13243 #define X(OPC,I,F,S) { (I), (F), (S) }
13248 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13249 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13250 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13251 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13252 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13253 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13254 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13255 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13256 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13257 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13258 #define NEON_ENC_SINGLE_(X) \
13259 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13260 #define NEON_ENC_DOUBLE_(X) \
13261 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13262 #define NEON_ENC_FPV8_(X) \
13263 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13265 #define NEON_ENCODE(type, inst) \
13268 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13269 inst.is_neon = 1; \
13273 #define check_neon_suffixes \
13276 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13278 as_bad (_("invalid neon suffix for non neon instruction")); \
13284 /* Define shapes for instruction operands. The following mnemonic characters
13285 are used in this table:
13287 F - VFP S<n> register
13288 D - Neon D<n> register
13289 Q - Neon Q<n> register
13293 L - D<n> register list
13295 This table is used to generate various data:
13296 - enumerations of the form NS_DDR to be used as arguments to
13298 - a table classifying shapes into single, double, quad, mixed.
13299 - a table used to drive neon_select_shape. */
13301 #define NEON_SHAPE_DEF \
13302 X(3, (D, D, D), DOUBLE), \
13303 X(3, (Q, Q, Q), QUAD), \
13304 X(3, (D, D, I), DOUBLE), \
13305 X(3, (Q, Q, I), QUAD), \
13306 X(3, (D, D, S), DOUBLE), \
13307 X(3, (Q, Q, S), QUAD), \
13308 X(2, (D, D), DOUBLE), \
13309 X(2, (Q, Q), QUAD), \
13310 X(2, (D, S), DOUBLE), \
13311 X(2, (Q, S), QUAD), \
13312 X(2, (D, R), DOUBLE), \
13313 X(2, (Q, R), QUAD), \
13314 X(2, (D, I), DOUBLE), \
13315 X(2, (Q, I), QUAD), \
13316 X(3, (D, L, D), DOUBLE), \
13317 X(2, (D, Q), MIXED), \
13318 X(2, (Q, D), MIXED), \
13319 X(3, (D, Q, I), MIXED), \
13320 X(3, (Q, D, I), MIXED), \
13321 X(3, (Q, D, D), MIXED), \
13322 X(3, (D, Q, Q), MIXED), \
13323 X(3, (Q, Q, D), MIXED), \
13324 X(3, (Q, D, S), MIXED), \
13325 X(3, (D, Q, S), MIXED), \
13326 X(4, (D, D, D, I), DOUBLE), \
13327 X(4, (Q, Q, Q, I), QUAD), \
13328 X(2, (F, F), SINGLE), \
13329 X(3, (F, F, F), SINGLE), \
13330 X(2, (F, I), SINGLE), \
13331 X(2, (F, D), MIXED), \
13332 X(2, (D, F), MIXED), \
13333 X(3, (F, F, I), MIXED), \
13334 X(4, (R, R, F, F), SINGLE), \
13335 X(4, (F, F, R, R), SINGLE), \
13336 X(3, (D, R, R), DOUBLE), \
13337 X(3, (R, R, D), DOUBLE), \
13338 X(2, (S, R), SINGLE), \
13339 X(2, (R, S), SINGLE), \
13340 X(2, (F, R), SINGLE), \
13341 X(2, (R, F), SINGLE), \
13342 /* Half float shape supported so far. */\
13343 X (2, (H, D), MIXED), \
13344 X (2, (D, H), MIXED), \
13345 X (2, (H, F), MIXED), \
13346 X (2, (F, H), MIXED), \
13347 X (2, (H, H), HALF), \
13348 X (2, (H, R), HALF), \
13349 X (2, (R, H), HALF), \
13350 X (2, (H, I), HALF), \
13351 X (3, (H, H, H), HALF), \
13352 X (3, (H, F, I), MIXED), \
13353 X (3, (F, H, I), MIXED)
13355 #define S2(A,B) NS_##A##B
13356 #define S3(A,B,C) NS_##A##B##C
13357 #define S4(A,B,C,D) NS_##A##B##C##D
13359 #define X(N, L, C) S##N L
13372 enum neon_shape_class
13381 #define X(N, L, C) SC_##C
13383 static enum neon_shape_class neon_shape_class
[] =
13402 /* Register widths of above. */
13403 static unsigned neon_shape_el_size
[] =
13415 struct neon_shape_info
13418 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13421 #define S2(A,B) { SE_##A, SE_##B }
13422 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13423 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13425 #define X(N, L, C) { N, S##N L }
13427 static struct neon_shape_info neon_shape_tab
[] =
13437 /* Bit masks used in type checking given instructions.
13438 'N_EQK' means the type must be the same as (or based on in some way) the key
13439 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13440 set, various other bits can be set as well in order to modify the meaning of
13441 the type constraint. */
13443 enum neon_type_mask
13467 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13468 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13469 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13470 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13471 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13472 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13473 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13474 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13475 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13476 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13477 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13479 N_MAX_NONSPECIAL
= N_P64
13482 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13484 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13485 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13486 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13487 #define N_S_32 (N_S8 | N_S16 | N_S32)
13488 #define N_F_16_32 (N_F16 | N_F32)
13489 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13490 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13491 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13492 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13494 /* Pass this as the first type argument to neon_check_type to ignore types
13496 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13498 /* Select a "shape" for the current instruction (describing register types or
13499 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13500 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13501 function of operand parsing, so this function doesn't need to be called.
13502 Shapes should be listed in order of decreasing length. */
13504 static enum neon_shape
13505 neon_select_shape (enum neon_shape shape
, ...)
13508 enum neon_shape first_shape
= shape
;
13510 /* Fix missing optional operands. FIXME: we don't know at this point how
13511 many arguments we should have, so this makes the assumption that we have
13512 > 1. This is true of all current Neon opcodes, I think, but may not be
13513 true in the future. */
13514 if (!inst
.operands
[1].present
)
13515 inst
.operands
[1] = inst
.operands
[0];
13517 va_start (ap
, shape
);
13519 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13524 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13526 if (!inst
.operands
[j
].present
)
13532 switch (neon_shape_tab
[shape
].el
[j
])
13534 /* If a .f16, .16, .u16, .s16 type specifier is given over
13535 a VFP single precision register operand, it's essentially
13536 means only half of the register is used.
13538 If the type specifier is given after the mnemonics, the
13539 information is stored in inst.vectype. If the type specifier
13540 is given after register operand, the information is stored
13541 in inst.operands[].vectype.
13543 When there is only one type specifier, and all the register
13544 operands are the same type of hardware register, the type
13545 specifier applies to all register operands.
13547 If no type specifier is given, the shape is inferred from
13548 operand information.
13551 vadd.f16 s0, s1, s2: NS_HHH
13552 vabs.f16 s0, s1: NS_HH
13553 vmov.f16 s0, r1: NS_HR
13554 vmov.f16 r0, s1: NS_RH
13555 vcvt.f16 r0, s1: NS_RH
13556 vcvt.f16.s32 s2, s2, #29: NS_HFI
13557 vcvt.f16.s32 s2, s2: NS_HF
13560 if (!(inst
.operands
[j
].isreg
13561 && inst
.operands
[j
].isvec
13562 && inst
.operands
[j
].issingle
13563 && !inst
.operands
[j
].isquad
13564 && ((inst
.vectype
.elems
== 1
13565 && inst
.vectype
.el
[0].size
== 16)
13566 || (inst
.vectype
.elems
> 1
13567 && inst
.vectype
.el
[j
].size
== 16)
13568 || (inst
.vectype
.elems
== 0
13569 && inst
.operands
[j
].vectype
.type
!= NT_invtype
13570 && inst
.operands
[j
].vectype
.size
== 16))))
13575 if (!(inst
.operands
[j
].isreg
13576 && inst
.operands
[j
].isvec
13577 && inst
.operands
[j
].issingle
13578 && !inst
.operands
[j
].isquad
13579 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
13580 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
13581 || (inst
.vectype
.elems
== 0
13582 && (inst
.operands
[j
].vectype
.size
== 32
13583 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
13588 if (!(inst
.operands
[j
].isreg
13589 && inst
.operands
[j
].isvec
13590 && !inst
.operands
[j
].isquad
13591 && !inst
.operands
[j
].issingle
))
13596 if (!(inst
.operands
[j
].isreg
13597 && !inst
.operands
[j
].isvec
))
13602 if (!(inst
.operands
[j
].isreg
13603 && inst
.operands
[j
].isvec
13604 && inst
.operands
[j
].isquad
13605 && !inst
.operands
[j
].issingle
))
13610 if (!(!inst
.operands
[j
].isreg
13611 && !inst
.operands
[j
].isscalar
))
13616 if (!(!inst
.operands
[j
].isreg
13617 && inst
.operands
[j
].isscalar
))
13627 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13628 /* We've matched all the entries in the shape table, and we don't
13629 have any left over operands which have not been matched. */
13635 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13636 first_error (_("invalid instruction shape"));
13641 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13642 means the Q bit should be set). */
13645 neon_quad (enum neon_shape shape
)
13647 return neon_shape_class
[shape
] == SC_QUAD
;
13651 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13654 /* Allow modification to be made to types which are constrained to be
13655 based on the key element, based on bits set alongside N_EQK. */
13656 if ((typebits
& N_EQK
) != 0)
13658 if ((typebits
& N_HLF
) != 0)
13660 else if ((typebits
& N_DBL
) != 0)
13662 if ((typebits
& N_SGN
) != 0)
13663 *g_type
= NT_signed
;
13664 else if ((typebits
& N_UNS
) != 0)
13665 *g_type
= NT_unsigned
;
13666 else if ((typebits
& N_INT
) != 0)
13667 *g_type
= NT_integer
;
13668 else if ((typebits
& N_FLT
) != 0)
13669 *g_type
= NT_float
;
13670 else if ((typebits
& N_SIZ
) != 0)
13671 *g_type
= NT_untyped
;
13675 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13676 operand type, i.e. the single type specified in a Neon instruction when it
13677 is the only one given. */
13679 static struct neon_type_el
13680 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13682 struct neon_type_el dest
= *key
;
13684 gas_assert ((thisarg
& N_EQK
) != 0);
13686 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13691 /* Convert Neon type and size into compact bitmask representation. */
13693 static enum neon_type_mask
13694 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13701 case 8: return N_8
;
13702 case 16: return N_16
;
13703 case 32: return N_32
;
13704 case 64: return N_64
;
13712 case 8: return N_I8
;
13713 case 16: return N_I16
;
13714 case 32: return N_I32
;
13715 case 64: return N_I64
;
13723 case 16: return N_F16
;
13724 case 32: return N_F32
;
13725 case 64: return N_F64
;
13733 case 8: return N_P8
;
13734 case 16: return N_P16
;
13735 case 64: return N_P64
;
13743 case 8: return N_S8
;
13744 case 16: return N_S16
;
13745 case 32: return N_S32
;
13746 case 64: return N_S64
;
13754 case 8: return N_U8
;
13755 case 16: return N_U16
;
13756 case 32: return N_U32
;
13757 case 64: return N_U64
;
13768 /* Convert compact Neon bitmask type representation to a type and size. Only
13769 handles the case where a single bit is set in the mask. */
13772 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13773 enum neon_type_mask mask
)
13775 if ((mask
& N_EQK
) != 0)
13778 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13780 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13782 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13784 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13789 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13791 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13792 *type
= NT_unsigned
;
13793 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13794 *type
= NT_integer
;
13795 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13796 *type
= NT_untyped
;
13797 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13799 else if ((mask
& (N_F_ALL
)) != 0)
13807 /* Modify a bitmask of allowed types. This is only needed for type
13811 modify_types_allowed (unsigned allowed
, unsigned mods
)
13814 enum neon_el_type type
;
13820 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13822 if (el_type_of_type_chk (&type
, &size
,
13823 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13825 neon_modify_type_size (mods
, &type
, &size
);
13826 destmask
|= type_chk_of_el_type (type
, size
);
13833 /* Check type and return type classification.
13834 The manual states (paraphrase): If one datatype is given, it indicates the
13836 - the second operand, if there is one
13837 - the operand, if there is no second operand
13838 - the result, if there are no operands.
13839 This isn't quite good enough though, so we use a concept of a "key" datatype
13840 which is set on a per-instruction basis, which is the one which matters when
13841 only one data type is written.
13842 Note: this function has side-effects (e.g. filling in missing operands). All
13843 Neon instructions should call it before performing bit encoding. */
13845 static struct neon_type_el
13846 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13849 unsigned i
, pass
, key_el
= 0;
13850 unsigned types
[NEON_MAX_TYPE_ELS
];
13851 enum neon_el_type k_type
= NT_invtype
;
13852 unsigned k_size
= -1u;
13853 struct neon_type_el badtype
= {NT_invtype
, -1};
13854 unsigned key_allowed
= 0;
13856 /* Optional registers in Neon instructions are always (not) in operand 1.
13857 Fill in the missing operand here, if it was omitted. */
13858 if (els
> 1 && !inst
.operands
[1].present
)
13859 inst
.operands
[1] = inst
.operands
[0];
13861 /* Suck up all the varargs. */
13863 for (i
= 0; i
< els
; i
++)
13865 unsigned thisarg
= va_arg (ap
, unsigned);
13866 if (thisarg
== N_IGNORE_TYPE
)
13871 types
[i
] = thisarg
;
13872 if ((thisarg
& N_KEY
) != 0)
13877 if (inst
.vectype
.elems
> 0)
13878 for (i
= 0; i
< els
; i
++)
13879 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13881 first_error (_("types specified in both the mnemonic and operands"));
13885 /* Duplicate inst.vectype elements here as necessary.
13886 FIXME: No idea if this is exactly the same as the ARM assembler,
13887 particularly when an insn takes one register and one non-register
13889 if (inst
.vectype
.elems
== 1 && els
> 1)
13892 inst
.vectype
.elems
= els
;
13893 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13894 for (j
= 0; j
< els
; j
++)
13896 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13899 else if (inst
.vectype
.elems
== 0 && els
> 0)
13902 /* No types were given after the mnemonic, so look for types specified
13903 after each operand. We allow some flexibility here; as long as the
13904 "key" operand has a type, we can infer the others. */
13905 for (j
= 0; j
< els
; j
++)
13906 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13907 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13909 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13911 for (j
= 0; j
< els
; j
++)
13912 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13913 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13918 first_error (_("operand types can't be inferred"));
13922 else if (inst
.vectype
.elems
!= els
)
13924 first_error (_("type specifier has the wrong number of parts"));
13928 for (pass
= 0; pass
< 2; pass
++)
13930 for (i
= 0; i
< els
; i
++)
13932 unsigned thisarg
= types
[i
];
13933 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13934 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
13935 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
13936 unsigned g_size
= inst
.vectype
.el
[i
].size
;
13938 /* Decay more-specific signed & unsigned types to sign-insensitive
13939 integer types if sign-specific variants are unavailable. */
13940 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
13941 && (types_allowed
& N_SU_ALL
) == 0)
13942 g_type
= NT_integer
;
13944 /* If only untyped args are allowed, decay any more specific types to
13945 them. Some instructions only care about signs for some element
13946 sizes, so handle that properly. */
13947 if (((types_allowed
& N_UNT
) == 0)
13948 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
13949 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
13950 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
13951 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
13952 g_type
= NT_untyped
;
13956 if ((thisarg
& N_KEY
) != 0)
13960 key_allowed
= thisarg
& ~N_KEY
;
13962 /* Check architecture constraint on FP16 extension. */
13964 && k_type
== NT_float
13965 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
13967 inst
.error
= _(BAD_FP16
);
13974 if ((thisarg
& N_VFP
) != 0)
13976 enum neon_shape_el regshape
;
13977 unsigned regwidth
, match
;
13979 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13982 first_error (_("invalid instruction shape"));
13985 regshape
= neon_shape_tab
[ns
].el
[i
];
13986 regwidth
= neon_shape_el_size
[regshape
];
13988 /* In VFP mode, operands must match register widths. If we
13989 have a key operand, use its width, else use the width of
13990 the current operand. */
13996 /* FP16 will use a single precision register. */
13997 if (regwidth
== 32 && match
== 16)
13999 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14003 inst
.error
= _(BAD_FP16
);
14008 if (regwidth
!= match
)
14010 first_error (_("operand size must match register width"));
14015 if ((thisarg
& N_EQK
) == 0)
14017 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14019 if ((given_type
& types_allowed
) == 0)
14021 first_error (_("bad type in Neon instruction"));
14027 enum neon_el_type mod_k_type
= k_type
;
14028 unsigned mod_k_size
= k_size
;
14029 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14030 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14032 first_error (_("inconsistent types in Neon instruction"));
14040 return inst
.vectype
.el
[key_el
];
14043 /* Neon-style VFP instruction forwarding. */
14045 /* Thumb VFP instructions have 0xE in the condition field. */
14048 do_vfp_cond_or_thumb (void)
14053 inst
.instruction
|= 0xe0000000;
14055 inst
.instruction
|= inst
.cond
<< 28;
14058 /* Look up and encode a simple mnemonic, for use as a helper function for the
14059 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14060 etc. It is assumed that operand parsing has already been done, and that the
14061 operands are in the form expected by the given opcode (this isn't necessarily
14062 the same as the form in which they were parsed, hence some massaging must
14063 take place before this function is called).
14064 Checks current arch version against that in the looked-up opcode. */
14067 do_vfp_nsyn_opcode (const char *opname
)
14069 const struct asm_opcode
*opcode
;
14071 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14076 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14077 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14084 inst
.instruction
= opcode
->tvalue
;
14085 opcode
->tencode ();
14089 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14090 opcode
->aencode ();
14095 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14097 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14099 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14102 do_vfp_nsyn_opcode ("fadds");
14104 do_vfp_nsyn_opcode ("fsubs");
14106 /* ARMv8.2 fp16 instruction. */
14108 do_scalar_fp16_v82_encode ();
14113 do_vfp_nsyn_opcode ("faddd");
14115 do_vfp_nsyn_opcode ("fsubd");
14119 /* Check operand types to see if this is a VFP instruction, and if so call
14123 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14125 enum neon_shape rs
;
14126 struct neon_type_el et
;
14131 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14132 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14136 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14137 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14138 N_F_ALL
| N_KEY
| N_VFP
);
14145 if (et
.type
!= NT_invtype
)
14156 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14158 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14160 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14163 do_vfp_nsyn_opcode ("fmacs");
14165 do_vfp_nsyn_opcode ("fnmacs");
14167 /* ARMv8.2 fp16 instruction. */
14169 do_scalar_fp16_v82_encode ();
14174 do_vfp_nsyn_opcode ("fmacd");
14176 do_vfp_nsyn_opcode ("fnmacd");
14181 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14183 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14185 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14188 do_vfp_nsyn_opcode ("ffmas");
14190 do_vfp_nsyn_opcode ("ffnmas");
14192 /* ARMv8.2 fp16 instruction. */
14194 do_scalar_fp16_v82_encode ();
14199 do_vfp_nsyn_opcode ("ffmad");
14201 do_vfp_nsyn_opcode ("ffnmad");
14206 do_vfp_nsyn_mul (enum neon_shape rs
)
14208 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14210 do_vfp_nsyn_opcode ("fmuls");
14212 /* ARMv8.2 fp16 instruction. */
14214 do_scalar_fp16_v82_encode ();
14217 do_vfp_nsyn_opcode ("fmuld");
14221 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14223 int is_neg
= (inst
.instruction
& 0x80) != 0;
14224 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14226 if (rs
== NS_FF
|| rs
== NS_HH
)
14229 do_vfp_nsyn_opcode ("fnegs");
14231 do_vfp_nsyn_opcode ("fabss");
14233 /* ARMv8.2 fp16 instruction. */
14235 do_scalar_fp16_v82_encode ();
14240 do_vfp_nsyn_opcode ("fnegd");
14242 do_vfp_nsyn_opcode ("fabsd");
14246 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14247 insns belong to Neon, and are handled elsewhere. */
14250 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14252 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14256 do_vfp_nsyn_opcode ("fldmdbs");
14258 do_vfp_nsyn_opcode ("fldmias");
14263 do_vfp_nsyn_opcode ("fstmdbs");
14265 do_vfp_nsyn_opcode ("fstmias");
14270 do_vfp_nsyn_sqrt (void)
14272 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14273 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14275 if (rs
== NS_FF
|| rs
== NS_HH
)
14277 do_vfp_nsyn_opcode ("fsqrts");
14279 /* ARMv8.2 fp16 instruction. */
14281 do_scalar_fp16_v82_encode ();
14284 do_vfp_nsyn_opcode ("fsqrtd");
14288 do_vfp_nsyn_div (void)
14290 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14291 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14292 N_F_ALL
| N_KEY
| N_VFP
);
14294 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14296 do_vfp_nsyn_opcode ("fdivs");
14298 /* ARMv8.2 fp16 instruction. */
14300 do_scalar_fp16_v82_encode ();
14303 do_vfp_nsyn_opcode ("fdivd");
14307 do_vfp_nsyn_nmul (void)
14309 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14310 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14311 N_F_ALL
| N_KEY
| N_VFP
);
14313 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14315 NEON_ENCODE (SINGLE
, inst
);
14316 do_vfp_sp_dyadic ();
14318 /* ARMv8.2 fp16 instruction. */
14320 do_scalar_fp16_v82_encode ();
14324 NEON_ENCODE (DOUBLE
, inst
);
14325 do_vfp_dp_rd_rn_rm ();
14327 do_vfp_cond_or_thumb ();
14332 do_vfp_nsyn_cmp (void)
14334 enum neon_shape rs
;
14335 if (inst
.operands
[1].isreg
)
14337 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14338 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14340 if (rs
== NS_FF
|| rs
== NS_HH
)
14342 NEON_ENCODE (SINGLE
, inst
);
14343 do_vfp_sp_monadic ();
14347 NEON_ENCODE (DOUBLE
, inst
);
14348 do_vfp_dp_rd_rm ();
14353 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14354 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14356 switch (inst
.instruction
& 0x0fffffff)
14359 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14362 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14368 if (rs
== NS_FI
|| rs
== NS_HI
)
14370 NEON_ENCODE (SINGLE
, inst
);
14371 do_vfp_sp_compare_z ();
14375 NEON_ENCODE (DOUBLE
, inst
);
14379 do_vfp_cond_or_thumb ();
14381 /* ARMv8.2 fp16 instruction. */
14382 if (rs
== NS_HI
|| rs
== NS_HH
)
14383 do_scalar_fp16_v82_encode ();
14387 nsyn_insert_sp (void)
14389 inst
.operands
[1] = inst
.operands
[0];
14390 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14391 inst
.operands
[0].reg
= REG_SP
;
14392 inst
.operands
[0].isreg
= 1;
14393 inst
.operands
[0].writeback
= 1;
14394 inst
.operands
[0].present
= 1;
14398 do_vfp_nsyn_push (void)
14402 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14403 _("register list must contain at least 1 and at most 16 "
14406 if (inst
.operands
[1].issingle
)
14407 do_vfp_nsyn_opcode ("fstmdbs");
14409 do_vfp_nsyn_opcode ("fstmdbd");
14413 do_vfp_nsyn_pop (void)
14417 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14418 _("register list must contain at least 1 and at most 16 "
14421 if (inst
.operands
[1].issingle
)
14422 do_vfp_nsyn_opcode ("fldmias");
14424 do_vfp_nsyn_opcode ("fldmiad");
14427 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14428 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14431 neon_dp_fixup (struct arm_it
* insn
)
14433 unsigned int i
= insn
->instruction
;
14438 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14449 insn
->instruction
= i
;
14452 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14456 neon_logbits (unsigned x
)
14458 return ffs (x
) - 4;
14461 #define LOW4(R) ((R) & 0xf)
14462 #define HI1(R) (((R) >> 4) & 1)
14464 /* Encode insns with bit pattern:
14466 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14467 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14469 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14470 different meaning for some instruction. */
14473 neon_three_same (int isquad
, int ubit
, int size
)
14475 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14476 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14477 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14478 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14479 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14480 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14481 inst
.instruction
|= (isquad
!= 0) << 6;
14482 inst
.instruction
|= (ubit
!= 0) << 24;
14484 inst
.instruction
|= neon_logbits (size
) << 20;
14486 neon_dp_fixup (&inst
);
14489 /* Encode instructions of the form:
14491 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14492 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14494 Don't write size if SIZE == -1. */
14497 neon_two_same (int qbit
, int ubit
, int size
)
14499 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14500 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14501 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14502 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14503 inst
.instruction
|= (qbit
!= 0) << 6;
14504 inst
.instruction
|= (ubit
!= 0) << 24;
14507 inst
.instruction
|= neon_logbits (size
) << 18;
14509 neon_dp_fixup (&inst
);
14512 /* Neon instruction encoders, in approximate order of appearance. */
14515 do_neon_dyadic_i_su (void)
14517 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14518 struct neon_type_el et
= neon_check_type (3, rs
,
14519 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14520 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14524 do_neon_dyadic_i64_su (void)
14526 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14527 struct neon_type_el et
= neon_check_type (3, rs
,
14528 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14529 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14533 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14536 unsigned size
= et
.size
>> 3;
14537 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14538 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14539 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14540 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14541 inst
.instruction
|= (isquad
!= 0) << 6;
14542 inst
.instruction
|= immbits
<< 16;
14543 inst
.instruction
|= (size
>> 3) << 7;
14544 inst
.instruction
|= (size
& 0x7) << 19;
14546 inst
.instruction
|= (uval
!= 0) << 24;
14548 neon_dp_fixup (&inst
);
14552 do_neon_shl_imm (void)
14554 if (!inst
.operands
[2].isreg
)
14556 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14557 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14558 int imm
= inst
.operands
[2].imm
;
14560 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14561 _("immediate out of range for shift"));
14562 NEON_ENCODE (IMMED
, inst
);
14563 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14567 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14568 struct neon_type_el et
= neon_check_type (3, rs
,
14569 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14572 /* VSHL/VQSHL 3-register variants have syntax such as:
14574 whereas other 3-register operations encoded by neon_three_same have
14577 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14579 tmp
= inst
.operands
[2].reg
;
14580 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14581 inst
.operands
[1].reg
= tmp
;
14582 NEON_ENCODE (INTEGER
, inst
);
14583 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14588 do_neon_qshl_imm (void)
14590 if (!inst
.operands
[2].isreg
)
14592 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14593 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14594 int imm
= inst
.operands
[2].imm
;
14596 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14597 _("immediate out of range for shift"));
14598 NEON_ENCODE (IMMED
, inst
);
14599 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14603 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14604 struct neon_type_el et
= neon_check_type (3, rs
,
14605 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14608 /* See note in do_neon_shl_imm. */
14609 tmp
= inst
.operands
[2].reg
;
14610 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14611 inst
.operands
[1].reg
= tmp
;
14612 NEON_ENCODE (INTEGER
, inst
);
14613 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14618 do_neon_rshl (void)
14620 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14621 struct neon_type_el et
= neon_check_type (3, rs
,
14622 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14625 tmp
= inst
.operands
[2].reg
;
14626 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14627 inst
.operands
[1].reg
= tmp
;
14628 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14632 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14634 /* Handle .I8 pseudo-instructions. */
14637 /* Unfortunately, this will make everything apart from zero out-of-range.
14638 FIXME is this the intended semantics? There doesn't seem much point in
14639 accepting .I8 if so. */
14640 immediate
|= immediate
<< 8;
14646 if (immediate
== (immediate
& 0x000000ff))
14648 *immbits
= immediate
;
14651 else if (immediate
== (immediate
& 0x0000ff00))
14653 *immbits
= immediate
>> 8;
14656 else if (immediate
== (immediate
& 0x00ff0000))
14658 *immbits
= immediate
>> 16;
14661 else if (immediate
== (immediate
& 0xff000000))
14663 *immbits
= immediate
>> 24;
14666 if ((immediate
& 0xffff) != (immediate
>> 16))
14667 goto bad_immediate
;
14668 immediate
&= 0xffff;
14671 if (immediate
== (immediate
& 0x000000ff))
14673 *immbits
= immediate
;
14676 else if (immediate
== (immediate
& 0x0000ff00))
14678 *immbits
= immediate
>> 8;
14683 first_error (_("immediate value out of range"));
14688 do_neon_logic (void)
14690 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14692 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14693 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14694 /* U bit and size field were set as part of the bitmask. */
14695 NEON_ENCODE (INTEGER
, inst
);
14696 neon_three_same (neon_quad (rs
), 0, -1);
14700 const int three_ops_form
= (inst
.operands
[2].present
14701 && !inst
.operands
[2].isreg
);
14702 const int immoperand
= (three_ops_form
? 2 : 1);
14703 enum neon_shape rs
= (three_ops_form
14704 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14705 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14706 struct neon_type_el et
= neon_check_type (2, rs
,
14707 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14708 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14712 if (et
.type
== NT_invtype
)
14715 if (three_ops_form
)
14716 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14717 _("first and second operands shall be the same register"));
14719 NEON_ENCODE (IMMED
, inst
);
14721 immbits
= inst
.operands
[immoperand
].imm
;
14724 /* .i64 is a pseudo-op, so the immediate must be a repeating
14726 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14727 inst
.operands
[immoperand
].reg
: 0))
14729 /* Set immbits to an invalid constant. */
14730 immbits
= 0xdeadbeef;
14737 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14741 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14745 /* Pseudo-instruction for VBIC. */
14746 neon_invert_size (&immbits
, 0, et
.size
);
14747 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14751 /* Pseudo-instruction for VORR. */
14752 neon_invert_size (&immbits
, 0, et
.size
);
14753 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14763 inst
.instruction
|= neon_quad (rs
) << 6;
14764 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14765 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14766 inst
.instruction
|= cmode
<< 8;
14767 neon_write_immbits (immbits
);
14769 neon_dp_fixup (&inst
);
14774 do_neon_bitfield (void)
14776 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14777 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14778 neon_three_same (neon_quad (rs
), 0, -1);
14782 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14785 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14786 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14788 if (et
.type
== NT_float
)
14790 NEON_ENCODE (FLOAT
, inst
);
14791 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
14795 NEON_ENCODE (INTEGER
, inst
);
14796 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14801 do_neon_dyadic_if_su (void)
14803 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14807 do_neon_dyadic_if_su_d (void)
14809 /* This version only allow D registers, but that constraint is enforced during
14810 operand parsing so we don't need to do anything extra here. */
14811 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14815 do_neon_dyadic_if_i_d (void)
14817 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14818 affected if we specify unsigned args. */
14819 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14822 enum vfp_or_neon_is_neon_bits
14825 NEON_CHECK_ARCH
= 2,
14826 NEON_CHECK_ARCH8
= 4
14829 /* Call this function if an instruction which may have belonged to the VFP or
14830 Neon instruction sets, but turned out to be a Neon instruction (due to the
14831 operand types involved, etc.). We have to check and/or fix-up a couple of
14834 - Make sure the user hasn't attempted to make a Neon instruction
14836 - Alter the value in the condition code field if necessary.
14837 - Make sure that the arch supports Neon instructions.
14839 Which of these operations take place depends on bits from enum
14840 vfp_or_neon_is_neon_bits.
14842 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14843 current instruction's condition is COND_ALWAYS, the condition field is
14844 changed to inst.uncond_value. This is necessary because instructions shared
14845 between VFP and Neon may be conditional for the VFP variants only, and the
14846 unconditional Neon version must have, e.g., 0xF in the condition field. */
14849 vfp_or_neon_is_neon (unsigned check
)
14851 /* Conditions are always legal in Thumb mode (IT blocks). */
14852 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14854 if (inst
.cond
!= COND_ALWAYS
)
14856 first_error (_(BAD_COND
));
14859 if (inst
.uncond_value
!= -1)
14860 inst
.instruction
|= inst
.uncond_value
<< 28;
14863 if ((check
& NEON_CHECK_ARCH
)
14864 && !mark_feature_used (&fpu_neon_ext_v1
))
14866 first_error (_(BAD_FPU
));
14870 if ((check
& NEON_CHECK_ARCH8
)
14871 && !mark_feature_used (&fpu_neon_ext_armv8
))
14873 first_error (_(BAD_FPU
));
14881 do_neon_addsub_if_i (void)
14883 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14886 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14889 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14890 affected if we specify unsigned args. */
14891 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14894 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14896 V<op> A,B (A is operand 0, B is operand 2)
14901 so handle that case specially. */
14904 neon_exchange_operands (void)
14906 if (inst
.operands
[1].present
)
14908 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
14910 /* Swap operands[1] and operands[2]. */
14911 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14912 inst
.operands
[1] = inst
.operands
[2];
14913 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14918 inst
.operands
[1] = inst
.operands
[2];
14919 inst
.operands
[2] = inst
.operands
[0];
14924 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14926 if (inst
.operands
[2].isreg
)
14929 neon_exchange_operands ();
14930 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14934 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14935 struct neon_type_el et
= neon_check_type (2, rs
,
14936 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
14938 NEON_ENCODE (IMMED
, inst
);
14939 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14940 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14941 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14942 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14943 inst
.instruction
|= neon_quad (rs
) << 6;
14944 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14945 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14947 neon_dp_fixup (&inst
);
14954 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
14958 do_neon_cmp_inv (void)
14960 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
14966 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
14969 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14970 scalars, which are encoded in 5 bits, M : Rm.
14971 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14972 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14976 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
14978 unsigned regno
= NEON_SCALAR_REG (scalar
);
14979 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
14984 if (regno
> 7 || elno
> 3)
14986 return regno
| (elno
<< 3);
14989 if (regno
> 15 || elno
> 1)
14991 return regno
| (elno
<< 4);
14995 first_error (_("scalar out of range for multiply instruction"));
15001 /* Encode multiply / multiply-accumulate scalar instructions. */
15004 neon_mul_mac (struct neon_type_el et
, int ubit
)
15008 /* Give a more helpful error message if we have an invalid type. */
15009 if (et
.type
== NT_invtype
)
15012 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15013 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15014 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15015 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15016 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15017 inst
.instruction
|= LOW4 (scalar
);
15018 inst
.instruction
|= HI1 (scalar
) << 5;
15019 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15020 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15021 inst
.instruction
|= (ubit
!= 0) << 24;
15023 neon_dp_fixup (&inst
);
15027 do_neon_mac_maybe_scalar (void)
15029 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15032 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15035 if (inst
.operands
[2].isscalar
)
15037 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15038 struct neon_type_el et
= neon_check_type (3, rs
,
15039 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15040 NEON_ENCODE (SCALAR
, inst
);
15041 neon_mul_mac (et
, neon_quad (rs
));
15045 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15046 affected if we specify unsigned args. */
15047 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15052 do_neon_fmac (void)
15054 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15057 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15060 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15066 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15067 struct neon_type_el et
= neon_check_type (3, rs
,
15068 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15069 neon_three_same (neon_quad (rs
), 0, et
.size
);
15072 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15073 same types as the MAC equivalents. The polynomial type for this instruction
15074 is encoded the same as the integer type. */
15079 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15082 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15085 if (inst
.operands
[2].isscalar
)
15086 do_neon_mac_maybe_scalar ();
15088 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15092 do_neon_qdmulh (void)
15094 if (inst
.operands
[2].isscalar
)
15096 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15097 struct neon_type_el et
= neon_check_type (3, rs
,
15098 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15099 NEON_ENCODE (SCALAR
, inst
);
15100 neon_mul_mac (et
, neon_quad (rs
));
15104 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15105 struct neon_type_el et
= neon_check_type (3, rs
,
15106 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15107 NEON_ENCODE (INTEGER
, inst
);
15108 /* The U bit (rounding) comes from bit mask. */
15109 neon_three_same (neon_quad (rs
), 0, et
.size
);
15114 do_neon_qrdmlah (void)
15116 /* Check we're on the correct architecture. */
15117 if (!mark_feature_used (&fpu_neon_ext_armv8
))
15119 _("instruction form not available on this architecture.");
15120 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
15122 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15123 record_feature_use (&fpu_neon_ext_v8_1
);
15126 if (inst
.operands
[2].isscalar
)
15128 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15129 struct neon_type_el et
= neon_check_type (3, rs
,
15130 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15131 NEON_ENCODE (SCALAR
, inst
);
15132 neon_mul_mac (et
, neon_quad (rs
));
15136 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15137 struct neon_type_el et
= neon_check_type (3, rs
,
15138 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15139 NEON_ENCODE (INTEGER
, inst
);
15140 /* The U bit (rounding) comes from bit mask. */
15141 neon_three_same (neon_quad (rs
), 0, et
.size
);
15146 do_neon_fcmp_absolute (void)
15148 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15149 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15150 N_F_16_32
| N_KEY
);
15151 /* Size field comes from bit mask. */
15152 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
15156 do_neon_fcmp_absolute_inv (void)
15158 neon_exchange_operands ();
15159 do_neon_fcmp_absolute ();
15163 do_neon_step (void)
15165 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15166 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15167 N_F_16_32
| N_KEY
);
15168 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15172 do_neon_abs_neg (void)
15174 enum neon_shape rs
;
15175 struct neon_type_el et
;
15177 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15180 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15183 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15184 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
15186 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15187 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15188 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15189 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15190 inst
.instruction
|= neon_quad (rs
) << 6;
15191 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15192 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15194 neon_dp_fixup (&inst
);
15200 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15201 struct neon_type_el et
= neon_check_type (2, rs
,
15202 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15203 int imm
= inst
.operands
[2].imm
;
15204 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15205 _("immediate out of range for insert"));
15206 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15212 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15213 struct neon_type_el et
= neon_check_type (2, rs
,
15214 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15215 int imm
= inst
.operands
[2].imm
;
15216 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15217 _("immediate out of range for insert"));
15218 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15222 do_neon_qshlu_imm (void)
15224 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15225 struct neon_type_el et
= neon_check_type (2, rs
,
15226 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15227 int imm
= inst
.operands
[2].imm
;
15228 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15229 _("immediate out of range for shift"));
15230 /* Only encodes the 'U present' variant of the instruction.
15231 In this case, signed types have OP (bit 8) set to 0.
15232 Unsigned types have OP set to 1. */
15233 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15234 /* The rest of the bits are the same as other immediate shifts. */
15235 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15239 do_neon_qmovn (void)
15241 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15242 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15243 /* Saturating move where operands can be signed or unsigned, and the
15244 destination has the same signedness. */
15245 NEON_ENCODE (INTEGER
, inst
);
15246 if (et
.type
== NT_unsigned
)
15247 inst
.instruction
|= 0xc0;
15249 inst
.instruction
|= 0x80;
15250 neon_two_same (0, 1, et
.size
/ 2);
15254 do_neon_qmovun (void)
15256 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15257 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15258 /* Saturating move with unsigned results. Operands must be signed. */
15259 NEON_ENCODE (INTEGER
, inst
);
15260 neon_two_same (0, 1, et
.size
/ 2);
15264 do_neon_rshift_sat_narrow (void)
15266 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15267 or unsigned. If operands are unsigned, results must also be unsigned. */
15268 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15269 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15270 int imm
= inst
.operands
[2].imm
;
15271 /* This gets the bounds check, size encoding and immediate bits calculation
15275 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15276 VQMOVN.I<size> <Dd>, <Qm>. */
15279 inst
.operands
[2].present
= 0;
15280 inst
.instruction
= N_MNEM_vqmovn
;
15285 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15286 _("immediate out of range"));
15287 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15291 do_neon_rshift_sat_narrow_u (void)
15293 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15294 or unsigned. If operands are unsigned, results must also be unsigned. */
15295 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15296 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15297 int imm
= inst
.operands
[2].imm
;
15298 /* This gets the bounds check, size encoding and immediate bits calculation
15302 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15303 VQMOVUN.I<size> <Dd>, <Qm>. */
15306 inst
.operands
[2].present
= 0;
15307 inst
.instruction
= N_MNEM_vqmovun
;
15312 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15313 _("immediate out of range"));
15314 /* FIXME: The manual is kind of unclear about what value U should have in
15315 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15317 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15321 do_neon_movn (void)
15323 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15324 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15325 NEON_ENCODE (INTEGER
, inst
);
15326 neon_two_same (0, 1, et
.size
/ 2);
15330 do_neon_rshift_narrow (void)
15332 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15333 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15334 int imm
= inst
.operands
[2].imm
;
15335 /* This gets the bounds check, size encoding and immediate bits calculation
15339 /* If immediate is zero then we are a pseudo-instruction for
15340 VMOVN.I<size> <Dd>, <Qm> */
15343 inst
.operands
[2].present
= 0;
15344 inst
.instruction
= N_MNEM_vmovn
;
15349 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15350 _("immediate out of range for narrowing operation"));
15351 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15355 do_neon_shll (void)
15357 /* FIXME: Type checking when lengthening. */
15358 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15359 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15360 unsigned imm
= inst
.operands
[2].imm
;
15362 if (imm
== et
.size
)
15364 /* Maximum shift variant. */
15365 NEON_ENCODE (INTEGER
, inst
);
15366 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15367 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15368 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15369 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15370 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15372 neon_dp_fixup (&inst
);
15376 /* A more-specific type check for non-max versions. */
15377 et
= neon_check_type (2, NS_QDI
,
15378 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15379 NEON_ENCODE (IMMED
, inst
);
15380 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15384 /* Check the various types for the VCVT instruction, and return which version
15385 the current instruction is. */
15387 #define CVT_FLAVOUR_VAR \
15388 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15389 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15390 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15391 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15392 /* Half-precision conversions. */ \
15393 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15394 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15395 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15396 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15397 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15398 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15399 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15400 Compared with single/double precision variants, only the co-processor \
15401 field is different, so the encoding flow is reused here. */ \
15402 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15403 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15404 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15405 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15406 /* VFP instructions. */ \
15407 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15408 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15409 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15410 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15411 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15412 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15413 /* VFP instructions with bitshift. */ \
15414 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15415 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15416 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15417 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15418 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15419 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15420 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15421 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15423 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15424 neon_cvt_flavour_##C,
15426 /* The different types of conversions we can do. */
15427 enum neon_cvt_flavour
15430 neon_cvt_flavour_invalid
,
15431 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15436 static enum neon_cvt_flavour
15437 get_neon_cvt_flavour (enum neon_shape rs
)
15439 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15440 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15441 if (et.type != NT_invtype) \
15443 inst.error = NULL; \
15444 return (neon_cvt_flavour_##C); \
15447 struct neon_type_el et
;
15448 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15449 || rs
== NS_FF
) ? N_VFP
: 0;
15450 /* The instruction versions which take an immediate take one register
15451 argument, which is extended to the width of the full register. Thus the
15452 "source" and "destination" registers must have the same width. Hack that
15453 here by making the size equal to the key (wider, in this case) operand. */
15454 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15458 return neon_cvt_flavour_invalid
;
15473 /* Neon-syntax VFP conversions. */
15476 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15478 const char *opname
= 0;
15480 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
15481 || rs
== NS_FHI
|| rs
== NS_HFI
)
15483 /* Conversions with immediate bitshift. */
15484 const char *enc
[] =
15486 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15492 if (flavour
< (int) ARRAY_SIZE (enc
))
15494 opname
= enc
[flavour
];
15495 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15496 _("operands 0 and 1 must be the same register"));
15497 inst
.operands
[1] = inst
.operands
[2];
15498 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15503 /* Conversions without bitshift. */
15504 const char *enc
[] =
15506 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15512 if (flavour
< (int) ARRAY_SIZE (enc
))
15513 opname
= enc
[flavour
];
15517 do_vfp_nsyn_opcode (opname
);
15519 /* ARMv8.2 fp16 VCVT instruction. */
15520 if (flavour
== neon_cvt_flavour_s32_f16
15521 || flavour
== neon_cvt_flavour_u32_f16
15522 || flavour
== neon_cvt_flavour_f16_u32
15523 || flavour
== neon_cvt_flavour_f16_s32
)
15524 do_scalar_fp16_v82_encode ();
15528 do_vfp_nsyn_cvtz (void)
15530 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
15531 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15532 const char *enc
[] =
15534 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15540 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15541 do_vfp_nsyn_opcode (enc
[flavour
]);
15545 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15546 enum neon_cvt_mode mode
)
15551 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15552 D register operands. */
15553 if (flavour
== neon_cvt_flavour_s32_f64
15554 || flavour
== neon_cvt_flavour_u32_f64
)
15555 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15558 if (flavour
== neon_cvt_flavour_s32_f16
15559 || flavour
== neon_cvt_flavour_u32_f16
)
15560 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
15563 set_it_insn_type (OUTSIDE_IT_INSN
);
15567 case neon_cvt_flavour_s32_f64
:
15571 case neon_cvt_flavour_s32_f32
:
15575 case neon_cvt_flavour_s32_f16
:
15579 case neon_cvt_flavour_u32_f64
:
15583 case neon_cvt_flavour_u32_f32
:
15587 case neon_cvt_flavour_u32_f16
:
15592 first_error (_("invalid instruction shape"));
15598 case neon_cvt_mode_a
: rm
= 0; break;
15599 case neon_cvt_mode_n
: rm
= 1; break;
15600 case neon_cvt_mode_p
: rm
= 2; break;
15601 case neon_cvt_mode_m
: rm
= 3; break;
15602 default: first_error (_("invalid rounding mode")); return;
15605 NEON_ENCODE (FPV8
, inst
);
15606 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15607 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15608 inst
.instruction
|= sz
<< 8;
15610 /* ARMv8.2 fp16 VCVT instruction. */
15611 if (flavour
== neon_cvt_flavour_s32_f16
15612 ||flavour
== neon_cvt_flavour_u32_f16
)
15613 do_scalar_fp16_v82_encode ();
15614 inst
.instruction
|= op
<< 7;
15615 inst
.instruction
|= rm
<< 16;
15616 inst
.instruction
|= 0xf0000000;
15617 inst
.is_neon
= TRUE
;
15621 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15623 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15624 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
15625 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
15627 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15629 if (flavour
== neon_cvt_flavour_invalid
)
15632 /* PR11109: Handle round-to-zero for VCVT conversions. */
15633 if (mode
== neon_cvt_mode_z
15634 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15635 && (flavour
== neon_cvt_flavour_s16_f16
15636 || flavour
== neon_cvt_flavour_u16_f16
15637 || flavour
== neon_cvt_flavour_s32_f32
15638 || flavour
== neon_cvt_flavour_u32_f32
15639 || flavour
== neon_cvt_flavour_s32_f64
15640 || flavour
== neon_cvt_flavour_u32_f64
)
15641 && (rs
== NS_FD
|| rs
== NS_FF
))
15643 do_vfp_nsyn_cvtz ();
15647 /* ARMv8.2 fp16 VCVT conversions. */
15648 if (mode
== neon_cvt_mode_z
15649 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
15650 && (flavour
== neon_cvt_flavour_s32_f16
15651 || flavour
== neon_cvt_flavour_u32_f16
)
15654 do_vfp_nsyn_cvtz ();
15655 do_scalar_fp16_v82_encode ();
15659 /* VFP rather than Neon conversions. */
15660 if (flavour
>= neon_cvt_flavour_first_fp
)
15662 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15663 do_vfp_nsyn_cvt (rs
, flavour
);
15665 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15676 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15677 0x0000100, 0x1000100, 0x0, 0x1000000};
15679 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15682 /* Fixed-point conversion with #0 immediate is encoded as an
15683 integer conversion. */
15684 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15686 NEON_ENCODE (IMMED
, inst
);
15687 if (flavour
!= neon_cvt_flavour_invalid
)
15688 inst
.instruction
|= enctab
[flavour
];
15689 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15690 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15691 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15692 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15693 inst
.instruction
|= neon_quad (rs
) << 6;
15694 inst
.instruction
|= 1 << 21;
15695 if (flavour
< neon_cvt_flavour_s16_f16
)
15697 inst
.instruction
|= 1 << 21;
15698 immbits
= 32 - inst
.operands
[2].imm
;
15699 inst
.instruction
|= immbits
<< 16;
15703 inst
.instruction
|= 3 << 20;
15704 immbits
= 16 - inst
.operands
[2].imm
;
15705 inst
.instruction
|= immbits
<< 16;
15706 inst
.instruction
&= ~(1 << 9);
15709 neon_dp_fixup (&inst
);
15715 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15717 NEON_ENCODE (FLOAT
, inst
);
15718 set_it_insn_type (OUTSIDE_IT_INSN
);
15720 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15723 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15724 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15725 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15726 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15727 inst
.instruction
|= neon_quad (rs
) << 6;
15728 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
15729 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
15730 inst
.instruction
|= mode
<< 8;
15731 if (flavour
== neon_cvt_flavour_u16_f16
15732 || flavour
== neon_cvt_flavour_s16_f16
)
15733 /* Mask off the original size bits and reencode them. */
15734 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
15737 inst
.instruction
|= 0xfc000000;
15739 inst
.instruction
|= 0xf0000000;
15745 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
15746 0x100, 0x180, 0x0, 0x080};
15748 NEON_ENCODE (INTEGER
, inst
);
15750 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15753 if (flavour
!= neon_cvt_flavour_invalid
)
15754 inst
.instruction
|= enctab
[flavour
];
15756 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15757 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15758 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15759 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15760 inst
.instruction
|= neon_quad (rs
) << 6;
15761 if (flavour
>= neon_cvt_flavour_s16_f16
15762 && flavour
<= neon_cvt_flavour_f16_u16
)
15763 /* Half precision. */
15764 inst
.instruction
|= 1 << 18;
15766 inst
.instruction
|= 2 << 18;
15768 neon_dp_fixup (&inst
);
15773 /* Half-precision conversions for Advanced SIMD -- neon. */
15778 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15780 as_bad (_("operand size must match register width"));
15785 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15787 as_bad (_("operand size must match register width"));
15792 inst
.instruction
= 0x3b60600;
15794 inst
.instruction
= 0x3b60700;
15796 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15797 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15798 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15799 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15800 neon_dp_fixup (&inst
);
15804 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15805 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15806 do_vfp_nsyn_cvt (rs
, flavour
);
15808 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15813 do_neon_cvtr (void)
15815 do_neon_cvt_1 (neon_cvt_mode_x
);
15821 do_neon_cvt_1 (neon_cvt_mode_z
);
15825 do_neon_cvta (void)
15827 do_neon_cvt_1 (neon_cvt_mode_a
);
15831 do_neon_cvtn (void)
15833 do_neon_cvt_1 (neon_cvt_mode_n
);
15837 do_neon_cvtp (void)
15839 do_neon_cvt_1 (neon_cvt_mode_p
);
15843 do_neon_cvtm (void)
15845 do_neon_cvt_1 (neon_cvt_mode_m
);
15849 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15852 mark_feature_used (&fpu_vfp_ext_armv8
);
15854 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15855 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15856 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15857 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15858 inst
.instruction
|= to
? 0x10000 : 0;
15859 inst
.instruction
|= t
? 0x80 : 0;
15860 inst
.instruction
|= is_double
? 0x100 : 0;
15861 do_vfp_cond_or_thumb ();
15865 do_neon_cvttb_1 (bfd_boolean t
)
15867 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
15868 NS_DF
, NS_DH
, NS_NULL
);
15872 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15875 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15877 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15880 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15882 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15884 /* The VCVTB and VCVTT instructions with D-register operands
15885 don't work for SP only targets. */
15886 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15890 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15892 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15894 /* The VCVTB and VCVTT instructions with D-register operands
15895 don't work for SP only targets. */
15896 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15900 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15907 do_neon_cvtb (void)
15909 do_neon_cvttb_1 (FALSE
);
15914 do_neon_cvtt (void)
15916 do_neon_cvttb_1 (TRUE
);
15920 neon_move_immediate (void)
15922 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15923 struct neon_type_el et
= neon_check_type (2, rs
,
15924 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15925 unsigned immlo
, immhi
= 0, immbits
;
15926 int op
, cmode
, float_p
;
15928 constraint (et
.type
== NT_invtype
,
15929 _("operand size must be specified for immediate VMOV"));
15931 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15932 op
= (inst
.instruction
& (1 << 5)) != 0;
15934 immlo
= inst
.operands
[1].imm
;
15935 if (inst
.operands
[1].regisimm
)
15936 immhi
= inst
.operands
[1].reg
;
15938 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
15939 _("immediate has bits set outside the operand size"));
15941 float_p
= inst
.operands
[1].immisfloat
;
15943 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
15944 et
.size
, et
.type
)) == FAIL
)
15946 /* Invert relevant bits only. */
15947 neon_invert_size (&immlo
, &immhi
, et
.size
);
15948 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15949 with one or the other; those cases are caught by
15950 neon_cmode_for_move_imm. */
15952 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
15953 &op
, et
.size
, et
.type
)) == FAIL
)
15955 first_error (_("immediate out of range"));
15960 inst
.instruction
&= ~(1 << 5);
15961 inst
.instruction
|= op
<< 5;
15963 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15964 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15965 inst
.instruction
|= neon_quad (rs
) << 6;
15966 inst
.instruction
|= cmode
<< 8;
15968 neon_write_immbits (immbits
);
15974 if (inst
.operands
[1].isreg
)
15976 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15978 NEON_ENCODE (INTEGER
, inst
);
15979 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15980 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15981 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15982 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15983 inst
.instruction
|= neon_quad (rs
) << 6;
15987 NEON_ENCODE (IMMED
, inst
);
15988 neon_move_immediate ();
15991 neon_dp_fixup (&inst
);
15994 /* Encode instructions of form:
15996 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15997 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16000 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16002 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16003 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16004 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16005 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16006 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16007 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16008 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16009 inst
.instruction
|= neon_logbits (size
) << 20;
16011 neon_dp_fixup (&inst
);
16015 do_neon_dyadic_long (void)
16017 /* FIXME: Type checking for lengthening op. */
16018 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16019 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16020 neon_mixed_length (et
, et
.size
);
16024 do_neon_abal (void)
16026 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16027 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16028 neon_mixed_length (et
, et
.size
);
16032 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16034 if (inst
.operands
[2].isscalar
)
16036 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16037 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16038 NEON_ENCODE (SCALAR
, inst
);
16039 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16043 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16044 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16045 NEON_ENCODE (INTEGER
, inst
);
16046 neon_mixed_length (et
, et
.size
);
16051 do_neon_mac_maybe_scalar_long (void)
16053 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16057 do_neon_dyadic_wide (void)
16059 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
16060 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16061 neon_mixed_length (et
, et
.size
);
16065 do_neon_dyadic_narrow (void)
16067 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16068 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
16069 /* Operand sign is unimportant, and the U bit is part of the opcode,
16070 so force the operand type to integer. */
16071 et
.type
= NT_integer
;
16072 neon_mixed_length (et
, et
.size
/ 2);
16076 do_neon_mul_sat_scalar_long (void)
16078 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
16082 do_neon_vmull (void)
16084 if (inst
.operands
[2].isscalar
)
16085 do_neon_mac_maybe_scalar_long ();
16088 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16089 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
16091 if (et
.type
== NT_poly
)
16092 NEON_ENCODE (POLY
, inst
);
16094 NEON_ENCODE (INTEGER
, inst
);
16096 /* For polynomial encoding the U bit must be zero, and the size must
16097 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16098 obviously, as 0b10). */
16101 /* Check we're on the correct architecture. */
16102 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
16104 _("Instruction form not available on this architecture.");
16109 neon_mixed_length (et
, et
.size
);
16116 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
16117 struct neon_type_el et
= neon_check_type (3, rs
,
16118 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16119 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
16121 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
16122 _("shift out of range"));
16123 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16124 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16125 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16126 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16127 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16128 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16129 inst
.instruction
|= neon_quad (rs
) << 6;
16130 inst
.instruction
|= imm
<< 8;
16132 neon_dp_fixup (&inst
);
16138 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16139 struct neon_type_el et
= neon_check_type (2, rs
,
16140 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16141 unsigned op
= (inst
.instruction
>> 7) & 3;
16142 /* N (width of reversed regions) is encoded as part of the bitmask. We
16143 extract it here to check the elements to be reversed are smaller.
16144 Otherwise we'd get a reserved instruction. */
16145 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
16146 gas_assert (elsize
!= 0);
16147 constraint (et
.size
>= elsize
,
16148 _("elements must be smaller than reversal region"));
16149 neon_two_same (neon_quad (rs
), 1, et
.size
);
16155 if (inst
.operands
[1].isscalar
)
16157 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16158 struct neon_type_el et
= neon_check_type (2, rs
,
16159 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16160 unsigned sizebits
= et
.size
>> 3;
16161 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16162 int logsize
= neon_logbits (et
.size
);
16163 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16165 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16168 NEON_ENCODE (SCALAR
, inst
);
16169 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16170 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16171 inst
.instruction
|= LOW4 (dm
);
16172 inst
.instruction
|= HI1 (dm
) << 5;
16173 inst
.instruction
|= neon_quad (rs
) << 6;
16174 inst
.instruction
|= x
<< 17;
16175 inst
.instruction
|= sizebits
<< 16;
16177 neon_dp_fixup (&inst
);
16181 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16182 struct neon_type_el et
= neon_check_type (2, rs
,
16183 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16184 /* Duplicate ARM register to lanes of vector. */
16185 NEON_ENCODE (ARMREG
, inst
);
16188 case 8: inst
.instruction
|= 0x400000; break;
16189 case 16: inst
.instruction
|= 0x000020; break;
16190 case 32: inst
.instruction
|= 0x000000; break;
16193 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16194 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16195 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16196 inst
.instruction
|= neon_quad (rs
) << 21;
16197 /* The encoding for this instruction is identical for the ARM and Thumb
16198 variants, except for the condition field. */
16199 do_vfp_cond_or_thumb ();
16203 /* VMOV has particularly many variations. It can be one of:
16204 0. VMOV<c><q> <Qd>, <Qm>
16205 1. VMOV<c><q> <Dd>, <Dm>
16206 (Register operations, which are VORR with Rm = Rn.)
16207 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16208 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16210 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16211 (ARM register to scalar.)
16212 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16213 (Two ARM registers to vector.)
16214 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16215 (Scalar to ARM register.)
16216 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16217 (Vector to two ARM registers.)
16218 8. VMOV.F32 <Sd>, <Sm>
16219 9. VMOV.F64 <Dd>, <Dm>
16220 (VFP register moves.)
16221 10. VMOV.F32 <Sd>, #imm
16222 11. VMOV.F64 <Dd>, #imm
16223 (VFP float immediate load.)
16224 12. VMOV <Rd>, <Sm>
16225 (VFP single to ARM reg.)
16226 13. VMOV <Sd>, <Rm>
16227 (ARM reg to VFP single.)
16228 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16229 (Two ARM regs to two VFP singles.)
16230 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16231 (Two VFP singles to two ARM regs.)
16233 These cases can be disambiguated using neon_select_shape, except cases 1/9
16234 and 3/11 which depend on the operand type too.
16236 All the encoded bits are hardcoded by this function.
16238 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16239 Cases 5, 7 may be used with VFPv2 and above.
16241 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16242 can specify a type where it doesn't make sense to, and is ignored). */
16247 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16248 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16249 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16250 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16251 struct neon_type_el et
;
16252 const char *ldconst
= 0;
16256 case NS_DD
: /* case 1/9. */
16257 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16258 /* It is not an error here if no type is given. */
16260 if (et
.type
== NT_float
&& et
.size
== 64)
16262 do_vfp_nsyn_opcode ("fcpyd");
16265 /* fall through. */
16267 case NS_QQ
: /* case 0/1. */
16269 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16271 /* The architecture manual I have doesn't explicitly state which
16272 value the U bit should have for register->register moves, but
16273 the equivalent VORR instruction has U = 0, so do that. */
16274 inst
.instruction
= 0x0200110;
16275 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16276 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16277 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16278 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16279 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16280 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16281 inst
.instruction
|= neon_quad (rs
) << 6;
16283 neon_dp_fixup (&inst
);
16287 case NS_DI
: /* case 3/11. */
16288 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16290 if (et
.type
== NT_float
&& et
.size
== 64)
16292 /* case 11 (fconstd). */
16293 ldconst
= "fconstd";
16294 goto encode_fconstd
;
16296 /* fall through. */
16298 case NS_QI
: /* case 2/3. */
16299 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16301 inst
.instruction
= 0x0800010;
16302 neon_move_immediate ();
16303 neon_dp_fixup (&inst
);
16306 case NS_SR
: /* case 4. */
16308 unsigned bcdebits
= 0;
16310 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
16311 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
16313 /* .<size> is optional here, defaulting to .32. */
16314 if (inst
.vectype
.elems
== 0
16315 && inst
.operands
[0].vectype
.type
== NT_invtype
16316 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16318 inst
.vectype
.el
[0].type
= NT_untyped
;
16319 inst
.vectype
.el
[0].size
= 32;
16320 inst
.vectype
.elems
= 1;
16323 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16324 logsize
= neon_logbits (et
.size
);
16326 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16328 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16329 && et
.size
!= 32, _(BAD_FPU
));
16330 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16331 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16335 case 8: bcdebits
= 0x8; break;
16336 case 16: bcdebits
= 0x1; break;
16337 case 32: bcdebits
= 0x0; break;
16341 bcdebits
|= x
<< logsize
;
16343 inst
.instruction
= 0xe000b10;
16344 do_vfp_cond_or_thumb ();
16345 inst
.instruction
|= LOW4 (dn
) << 16;
16346 inst
.instruction
|= HI1 (dn
) << 7;
16347 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16348 inst
.instruction
|= (bcdebits
& 3) << 5;
16349 inst
.instruction
|= (bcdebits
>> 2) << 21;
16353 case NS_DRR
: /* case 5 (fmdrr). */
16354 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16357 inst
.instruction
= 0xc400b10;
16358 do_vfp_cond_or_thumb ();
16359 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16360 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16361 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16362 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16365 case NS_RS
: /* case 6. */
16368 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16369 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16370 unsigned abcdebits
= 0;
16372 /* .<dt> is optional here, defaulting to .32. */
16373 if (inst
.vectype
.elems
== 0
16374 && inst
.operands
[0].vectype
.type
== NT_invtype
16375 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16377 inst
.vectype
.el
[0].type
= NT_untyped
;
16378 inst
.vectype
.el
[0].size
= 32;
16379 inst
.vectype
.elems
= 1;
16382 et
= neon_check_type (2, NS_NULL
,
16383 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16384 logsize
= neon_logbits (et
.size
);
16386 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16388 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16389 && et
.size
!= 32, _(BAD_FPU
));
16390 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16391 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16395 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16396 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16397 case 32: abcdebits
= 0x00; break;
16401 abcdebits
|= x
<< logsize
;
16402 inst
.instruction
= 0xe100b10;
16403 do_vfp_cond_or_thumb ();
16404 inst
.instruction
|= LOW4 (dn
) << 16;
16405 inst
.instruction
|= HI1 (dn
) << 7;
16406 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16407 inst
.instruction
|= (abcdebits
& 3) << 5;
16408 inst
.instruction
|= (abcdebits
>> 2) << 21;
16412 case NS_RRD
: /* case 7 (fmrrd). */
16413 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16416 inst
.instruction
= 0xc500b10;
16417 do_vfp_cond_or_thumb ();
16418 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16419 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16420 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16421 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16424 case NS_FF
: /* case 8 (fcpys). */
16425 do_vfp_nsyn_opcode ("fcpys");
16429 case NS_FI
: /* case 10 (fconsts). */
16430 ldconst
= "fconsts";
16432 if (is_quarter_float (inst
.operands
[1].imm
))
16434 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16435 do_vfp_nsyn_opcode (ldconst
);
16437 /* ARMv8.2 fp16 vmov.f16 instruction. */
16439 do_scalar_fp16_v82_encode ();
16442 first_error (_("immediate out of range"));
16446 case NS_RF
: /* case 12 (fmrs). */
16447 do_vfp_nsyn_opcode ("fmrs");
16448 /* ARMv8.2 fp16 vmov.f16 instruction. */
16450 do_scalar_fp16_v82_encode ();
16454 case NS_FR
: /* case 13 (fmsr). */
16455 do_vfp_nsyn_opcode ("fmsr");
16456 /* ARMv8.2 fp16 vmov.f16 instruction. */
16458 do_scalar_fp16_v82_encode ();
16461 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16462 (one of which is a list), but we have parsed four. Do some fiddling to
16463 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16465 case NS_RRFF
: /* case 14 (fmrrs). */
16466 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16467 _("VFP registers must be adjacent"));
16468 inst
.operands
[2].imm
= 2;
16469 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16470 do_vfp_nsyn_opcode ("fmrrs");
16473 case NS_FFRR
: /* case 15 (fmsrr). */
16474 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16475 _("VFP registers must be adjacent"));
16476 inst
.operands
[1] = inst
.operands
[2];
16477 inst
.operands
[2] = inst
.operands
[3];
16478 inst
.operands
[0].imm
= 2;
16479 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16480 do_vfp_nsyn_opcode ("fmsrr");
16484 /* neon_select_shape has determined that the instruction
16485 shape is wrong and has already set the error message. */
16494 do_neon_rshift_round_imm (void)
16496 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16497 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16498 int imm
= inst
.operands
[2].imm
;
16500 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16503 inst
.operands
[2].present
= 0;
16508 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16509 _("immediate out of range for shift"));
16510 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16515 do_neon_movhf (void)
16517 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
16518 constraint (rs
!= NS_HH
, _("invalid suffix"));
16520 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16523 do_vfp_sp_monadic ();
16526 inst
.instruction
|= 0xf0000000;
16530 do_neon_movl (void)
16532 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16533 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16534 unsigned sizebits
= et
.size
>> 3;
16535 inst
.instruction
|= sizebits
<< 19;
16536 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16542 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16543 struct neon_type_el et
= neon_check_type (2, rs
,
16544 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16545 NEON_ENCODE (INTEGER
, inst
);
16546 neon_two_same (neon_quad (rs
), 1, et
.size
);
16550 do_neon_zip_uzp (void)
16552 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16553 struct neon_type_el et
= neon_check_type (2, rs
,
16554 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16555 if (rs
== NS_DD
&& et
.size
== 32)
16557 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16558 inst
.instruction
= N_MNEM_vtrn
;
16562 neon_two_same (neon_quad (rs
), 1, et
.size
);
16566 do_neon_sat_abs_neg (void)
16568 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16569 struct neon_type_el et
= neon_check_type (2, rs
,
16570 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16571 neon_two_same (neon_quad (rs
), 1, et
.size
);
16575 do_neon_pair_long (void)
16577 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16578 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16579 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16580 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16581 neon_two_same (neon_quad (rs
), 1, et
.size
);
16585 do_neon_recip_est (void)
16587 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16588 struct neon_type_el et
= neon_check_type (2, rs
,
16589 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
16590 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16591 neon_two_same (neon_quad (rs
), 1, et
.size
);
16597 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16598 struct neon_type_el et
= neon_check_type (2, rs
,
16599 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16600 neon_two_same (neon_quad (rs
), 1, et
.size
);
16606 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16607 struct neon_type_el et
= neon_check_type (2, rs
,
16608 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16609 neon_two_same (neon_quad (rs
), 1, et
.size
);
16615 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16616 struct neon_type_el et
= neon_check_type (2, rs
,
16617 N_EQK
| N_INT
, N_8
| N_KEY
);
16618 neon_two_same (neon_quad (rs
), 1, et
.size
);
16624 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16625 neon_two_same (neon_quad (rs
), 1, -1);
16629 do_neon_tbl_tbx (void)
16631 unsigned listlenbits
;
16632 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16634 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16636 first_error (_("bad list length for table lookup"));
16640 listlenbits
= inst
.operands
[1].imm
- 1;
16641 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16642 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16643 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16644 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16645 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16646 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16647 inst
.instruction
|= listlenbits
<< 8;
16649 neon_dp_fixup (&inst
);
16653 do_neon_ldm_stm (void)
16655 /* P, U and L bits are part of bitmask. */
16656 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16657 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16659 if (inst
.operands
[1].issingle
)
16661 do_vfp_nsyn_ldm_stm (is_dbmode
);
16665 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16666 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16668 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16669 _("register list must contain at least 1 and at most 16 "
16672 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16673 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16674 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16675 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16677 inst
.instruction
|= offsetbits
;
16679 do_vfp_cond_or_thumb ();
16683 do_neon_ldr_str (void)
16685 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16687 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16688 And is UNPREDICTABLE in thumb mode. */
16690 && inst
.operands
[1].reg
== REG_PC
16691 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16694 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16695 else if (warn_on_deprecated
)
16696 as_tsktsk (_("Use of PC here is deprecated"));
16699 if (inst
.operands
[0].issingle
)
16702 do_vfp_nsyn_opcode ("flds");
16704 do_vfp_nsyn_opcode ("fsts");
16706 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16707 if (inst
.vectype
.el
[0].size
== 16)
16708 do_scalar_fp16_v82_encode ();
16713 do_vfp_nsyn_opcode ("fldd");
16715 do_vfp_nsyn_opcode ("fstd");
16719 /* "interleave" version also handles non-interleaving register VLD1/VST1
16723 do_neon_ld_st_interleave (void)
16725 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16726 N_8
| N_16
| N_32
| N_64
);
16727 unsigned alignbits
= 0;
16729 /* The bits in this table go:
16730 0: register stride of one (0) or two (1)
16731 1,2: register list length, minus one (1, 2, 3, 4).
16732 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16733 We use -1 for invalid entries. */
16734 const int typetable
[] =
16736 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16737 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16738 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16739 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16743 if (et
.type
== NT_invtype
)
16746 if (inst
.operands
[1].immisalign
)
16747 switch (inst
.operands
[1].imm
>> 8)
16749 case 64: alignbits
= 1; break;
16751 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16752 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16753 goto bad_alignment
;
16757 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16758 goto bad_alignment
;
16763 first_error (_("bad alignment"));
16767 inst
.instruction
|= alignbits
<< 4;
16768 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16770 /* Bits [4:6] of the immediate in a list specifier encode register stride
16771 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16772 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16773 up the right value for "type" in a table based on this value and the given
16774 list style, then stick it back. */
16775 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16776 | (((inst
.instruction
>> 8) & 3) << 3);
16778 typebits
= typetable
[idx
];
16780 constraint (typebits
== -1, _("bad list type for instruction"));
16781 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16782 _("bad element type for instruction"));
16784 inst
.instruction
&= ~0xf00;
16785 inst
.instruction
|= typebits
<< 8;
16788 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16789 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16790 otherwise. The variable arguments are a list of pairs of legal (size, align)
16791 values, terminated with -1. */
16794 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
16797 int result
= FAIL
, thissize
, thisalign
;
16799 if (!inst
.operands
[1].immisalign
)
16805 va_start (ap
, do_alignment
);
16809 thissize
= va_arg (ap
, int);
16810 if (thissize
== -1)
16812 thisalign
= va_arg (ap
, int);
16814 if (size
== thissize
&& align
== thisalign
)
16817 while (result
!= SUCCESS
);
16821 if (result
== SUCCESS
)
16824 first_error (_("unsupported alignment for instruction"));
16830 do_neon_ld_st_lane (void)
16832 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16833 int align_good
, do_alignment
= 0;
16834 int logsize
= neon_logbits (et
.size
);
16835 int align
= inst
.operands
[1].imm
>> 8;
16836 int n
= (inst
.instruction
>> 8) & 3;
16837 int max_el
= 64 / et
.size
;
16839 if (et
.type
== NT_invtype
)
16842 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16843 _("bad list length"));
16844 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16845 _("scalar index out of range"));
16846 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16848 _("stride of 2 unavailable when element size is 8"));
16852 case 0: /* VLD1 / VST1. */
16853 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
16855 if (align_good
== FAIL
)
16859 unsigned alignbits
= 0;
16862 case 16: alignbits
= 0x1; break;
16863 case 32: alignbits
= 0x3; break;
16866 inst
.instruction
|= alignbits
<< 4;
16870 case 1: /* VLD2 / VST2. */
16871 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
16872 16, 32, 32, 64, -1);
16873 if (align_good
== FAIL
)
16876 inst
.instruction
|= 1 << 4;
16879 case 2: /* VLD3 / VST3. */
16880 constraint (inst
.operands
[1].immisalign
,
16881 _("can't use alignment with this instruction"));
16884 case 3: /* VLD4 / VST4. */
16885 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
16886 16, 64, 32, 64, 32, 128, -1);
16887 if (align_good
== FAIL
)
16891 unsigned alignbits
= 0;
16894 case 8: alignbits
= 0x1; break;
16895 case 16: alignbits
= 0x1; break;
16896 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16899 inst
.instruction
|= alignbits
<< 4;
16906 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16907 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16908 inst
.instruction
|= 1 << (4 + logsize
);
16910 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16911 inst
.instruction
|= logsize
<< 10;
16914 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16917 do_neon_ld_dup (void)
16919 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16920 int align_good
, do_alignment
= 0;
16922 if (et
.type
== NT_invtype
)
16925 switch ((inst
.instruction
>> 8) & 3)
16927 case 0: /* VLD1. */
16928 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16929 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16930 &do_alignment
, 16, 16, 32, 32, -1);
16931 if (align_good
== FAIL
)
16933 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
16936 case 2: inst
.instruction
|= 1 << 5; break;
16937 default: first_error (_("bad list length")); return;
16939 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16942 case 1: /* VLD2. */
16943 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16944 &do_alignment
, 8, 16, 16, 32, 32, 64,
16946 if (align_good
== FAIL
)
16948 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
16949 _("bad list length"));
16950 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16951 inst
.instruction
|= 1 << 5;
16952 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16955 case 2: /* VLD3. */
16956 constraint (inst
.operands
[1].immisalign
,
16957 _("can't use alignment with this instruction"));
16958 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
16959 _("bad list length"));
16960 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16961 inst
.instruction
|= 1 << 5;
16962 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16965 case 3: /* VLD4. */
16967 int align
= inst
.operands
[1].imm
>> 8;
16968 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
16969 16, 64, 32, 64, 32, 128, -1);
16970 if (align_good
== FAIL
)
16972 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
16973 _("bad list length"));
16974 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16975 inst
.instruction
|= 1 << 5;
16976 if (et
.size
== 32 && align
== 128)
16977 inst
.instruction
|= 0x3 << 6;
16979 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16986 inst
.instruction
|= do_alignment
<< 4;
16989 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16990 apart from bits [11:4]. */
16993 do_neon_ldx_stx (void)
16995 if (inst
.operands
[1].isreg
)
16996 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
16998 switch (NEON_LANE (inst
.operands
[0].imm
))
17000 case NEON_INTERLEAVE_LANES
:
17001 NEON_ENCODE (INTERLV
, inst
);
17002 do_neon_ld_st_interleave ();
17005 case NEON_ALL_LANES
:
17006 NEON_ENCODE (DUP
, inst
);
17007 if (inst
.instruction
== N_INV
)
17009 first_error ("only loads support such operands");
17016 NEON_ENCODE (LANE
, inst
);
17017 do_neon_ld_st_lane ();
17020 /* L bit comes from bit mask. */
17021 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17022 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17023 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17025 if (inst
.operands
[1].postind
)
17027 int postreg
= inst
.operands
[1].imm
& 0xf;
17028 constraint (!inst
.operands
[1].immisreg
,
17029 _("post-index must be a register"));
17030 constraint (postreg
== 0xd || postreg
== 0xf,
17031 _("bad register for post-index"));
17032 inst
.instruction
|= postreg
;
17036 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17037 constraint (inst
.reloc
.exp
.X_op
!= O_constant
17038 || inst
.reloc
.exp
.X_add_number
!= 0,
17041 if (inst
.operands
[1].writeback
)
17043 inst
.instruction
|= 0xd;
17046 inst
.instruction
|= 0xf;
17050 inst
.instruction
|= 0xf9000000;
17052 inst
.instruction
|= 0xf4000000;
17057 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
17059 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17060 D register operands. */
17061 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17062 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17065 NEON_ENCODE (FPV8
, inst
);
17067 if (rs
== NS_FFF
|| rs
== NS_HHH
)
17069 do_vfp_sp_dyadic ();
17071 /* ARMv8.2 fp16 instruction. */
17073 do_scalar_fp16_v82_encode ();
17076 do_vfp_dp_rd_rn_rm ();
17079 inst
.instruction
|= 0x100;
17081 inst
.instruction
|= 0xf0000000;
17087 set_it_insn_type (OUTSIDE_IT_INSN
);
17089 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
17090 first_error (_("invalid instruction shape"));
17096 set_it_insn_type (OUTSIDE_IT_INSN
);
17098 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
17101 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17104 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
17108 do_vrint_1 (enum neon_cvt_mode mode
)
17110 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
17111 struct neon_type_el et
;
17116 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17117 D register operands. */
17118 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17119 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17122 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
17124 if (et
.type
!= NT_invtype
)
17126 /* VFP encodings. */
17127 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17128 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
17129 set_it_insn_type (OUTSIDE_IT_INSN
);
17131 NEON_ENCODE (FPV8
, inst
);
17132 if (rs
== NS_FF
|| rs
== NS_HH
)
17133 do_vfp_sp_monadic ();
17135 do_vfp_dp_rd_rm ();
17139 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
17140 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
17141 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
17142 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
17143 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
17144 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
17145 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
17149 inst
.instruction
|= (rs
== NS_DD
) << 8;
17150 do_vfp_cond_or_thumb ();
17152 /* ARMv8.2 fp16 vrint instruction. */
17154 do_scalar_fp16_v82_encode ();
17158 /* Neon encodings (or something broken...). */
17160 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
17162 if (et
.type
== NT_invtype
)
17165 set_it_insn_type (OUTSIDE_IT_INSN
);
17166 NEON_ENCODE (FLOAT
, inst
);
17168 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17171 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17172 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17173 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17174 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17175 inst
.instruction
|= neon_quad (rs
) << 6;
17176 /* Mask off the original size bits and reencode them. */
17177 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
17178 | neon_logbits (et
.size
) << 18);
17182 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17183 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17184 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17185 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17186 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17187 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17188 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17193 inst
.instruction
|= 0xfc000000;
17195 inst
.instruction
|= 0xf0000000;
17202 do_vrint_1 (neon_cvt_mode_x
);
17208 do_vrint_1 (neon_cvt_mode_z
);
17214 do_vrint_1 (neon_cvt_mode_r
);
17220 do_vrint_1 (neon_cvt_mode_a
);
17226 do_vrint_1 (neon_cvt_mode_n
);
17232 do_vrint_1 (neon_cvt_mode_p
);
17238 do_vrint_1 (neon_cvt_mode_m
);
17241 /* Crypto v1 instructions. */
17243 do_crypto_2op_1 (unsigned elttype
, int op
)
17245 set_it_insn_type (OUTSIDE_IT_INSN
);
17247 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
17253 NEON_ENCODE (INTEGER
, inst
);
17254 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17255 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17256 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17257 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17259 inst
.instruction
|= op
<< 6;
17262 inst
.instruction
|= 0xfc000000;
17264 inst
.instruction
|= 0xf0000000;
17268 do_crypto_3op_1 (int u
, int op
)
17270 set_it_insn_type (OUTSIDE_IT_INSN
);
17272 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
17273 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
17278 NEON_ENCODE (INTEGER
, inst
);
17279 neon_three_same (1, u
, 8 << op
);
17285 do_crypto_2op_1 (N_8
, 0);
17291 do_crypto_2op_1 (N_8
, 1);
17297 do_crypto_2op_1 (N_8
, 2);
17303 do_crypto_2op_1 (N_8
, 3);
17309 do_crypto_3op_1 (0, 0);
17315 do_crypto_3op_1 (0, 1);
17321 do_crypto_3op_1 (0, 2);
17327 do_crypto_3op_1 (0, 3);
17333 do_crypto_3op_1 (1, 0);
17339 do_crypto_3op_1 (1, 1);
17343 do_sha256su1 (void)
17345 do_crypto_3op_1 (1, 2);
17351 do_crypto_2op_1 (N_32
, -1);
17357 do_crypto_2op_1 (N_32
, 0);
17361 do_sha256su0 (void)
17363 do_crypto_2op_1 (N_32
, 1);
17367 do_crc32_1 (unsigned int poly
, unsigned int sz
)
17369 unsigned int Rd
= inst
.operands
[0].reg
;
17370 unsigned int Rn
= inst
.operands
[1].reg
;
17371 unsigned int Rm
= inst
.operands
[2].reg
;
17373 set_it_insn_type (OUTSIDE_IT_INSN
);
17374 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
17375 inst
.instruction
|= LOW4 (Rn
) << 16;
17376 inst
.instruction
|= LOW4 (Rm
);
17377 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
17378 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
17380 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
17381 as_warn (UNPRED_REG ("r15"));
17382 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
17383 as_warn (UNPRED_REG ("r13"));
17423 /* Overall per-instruction processing. */
17425 /* We need to be able to fix up arbitrary expressions in some statements.
17426 This is so that we can handle symbols that are an arbitrary distance from
17427 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17428 which returns part of an address in a form which will be valid for
17429 a data instruction. We do this by pushing the expression into a symbol
17430 in the expr_section, and creating a fix for that. */
17433 fix_new_arm (fragS
* frag
,
17447 /* Create an absolute valued symbol, so we have something to
17448 refer to in the object file. Unfortunately for us, gas's
17449 generic expression parsing will already have folded out
17450 any use of .set foo/.type foo %function that may have
17451 been used to set type information of the target location,
17452 that's being specified symbolically. We have to presume
17453 the user knows what they are doing. */
17457 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17459 symbol
= symbol_find_or_make (name
);
17460 S_SET_SEGMENT (symbol
, absolute_section
);
17461 symbol_set_frag (symbol
, &zero_address_frag
);
17462 S_SET_VALUE (symbol
, exp
->X_add_number
);
17463 exp
->X_op
= O_symbol
;
17464 exp
->X_add_symbol
= symbol
;
17465 exp
->X_add_number
= 0;
17471 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17472 (enum bfd_reloc_code_real
) reloc
);
17476 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17477 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17481 /* Mark whether the fix is to a THUMB instruction, or an ARM
17483 new_fix
->tc_fix_data
= thumb_mode
;
17486 /* Create a frg for an instruction requiring relaxation. */
17488 output_relax_insn (void)
17494 /* The size of the instruction is unknown, so tie the debug info to the
17495 start of the instruction. */
17496 dwarf2_emit_insn (0);
17498 switch (inst
.reloc
.exp
.X_op
)
17501 sym
= inst
.reloc
.exp
.X_add_symbol
;
17502 offset
= inst
.reloc
.exp
.X_add_number
;
17506 offset
= inst
.reloc
.exp
.X_add_number
;
17509 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17513 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17514 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17515 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17518 /* Write a 32-bit thumb instruction to buf. */
17520 put_thumb32_insn (char * buf
, unsigned long insn
)
17522 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17523 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17527 output_inst (const char * str
)
17533 as_bad ("%s -- `%s'", inst
.error
, str
);
17538 output_relax_insn ();
17541 if (inst
.size
== 0)
17544 to
= frag_more (inst
.size
);
17545 /* PR 9814: Record the thumb mode into the current frag so that we know
17546 what type of NOP padding to use, if necessary. We override any previous
17547 setting so that if the mode has changed then the NOPS that we use will
17548 match the encoding of the last instruction in the frag. */
17549 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17551 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17553 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17554 put_thumb32_insn (to
, inst
.instruction
);
17556 else if (inst
.size
> INSN_SIZE
)
17558 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17559 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17560 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17563 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17565 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17566 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17567 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17570 dwarf2_emit_insn (inst
.size
);
17574 output_it_inst (int cond
, int mask
, char * to
)
17576 unsigned long instruction
= 0xbf00;
17579 instruction
|= mask
;
17580 instruction
|= cond
<< 4;
17584 to
= frag_more (2);
17586 dwarf2_emit_insn (2);
17590 md_number_to_chars (to
, instruction
, 2);
17595 /* Tag values used in struct asm_opcode's tag field. */
17598 OT_unconditional
, /* Instruction cannot be conditionalized.
17599 The ARM condition field is still 0xE. */
17600 OT_unconditionalF
, /* Instruction cannot be conditionalized
17601 and carries 0xF in its ARM condition field. */
17602 OT_csuffix
, /* Instruction takes a conditional suffix. */
17603 OT_csuffixF
, /* Some forms of the instruction take a conditional
17604 suffix, others place 0xF where the condition field
17606 OT_cinfix3
, /* Instruction takes a conditional infix,
17607 beginning at character index 3. (In
17608 unified mode, it becomes a suffix.) */
17609 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17610 tsts, cmps, cmns, and teqs. */
17611 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17612 character index 3, even in unified mode. Used for
17613 legacy instructions where suffix and infix forms
17614 may be ambiguous. */
17615 OT_csuf_or_in3
, /* Instruction takes either a conditional
17616 suffix or an infix at character index 3. */
17617 OT_odd_infix_unc
, /* This is the unconditional variant of an
17618 instruction that takes a conditional infix
17619 at an unusual position. In unified mode,
17620 this variant will accept a suffix. */
17621 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17622 are the conditional variants of instructions that
17623 take conditional infixes in unusual positions.
17624 The infix appears at character index
17625 (tag - OT_odd_infix_0). These are not accepted
17626 in unified mode. */
17629 /* Subroutine of md_assemble, responsible for looking up the primary
17630 opcode from the mnemonic the user wrote. STR points to the
17631 beginning of the mnemonic.
17633 This is not simply a hash table lookup, because of conditional
17634 variants. Most instructions have conditional variants, which are
17635 expressed with a _conditional affix_ to the mnemonic. If we were
17636 to encode each conditional variant as a literal string in the opcode
17637 table, it would have approximately 20,000 entries.
17639 Most mnemonics take this affix as a suffix, and in unified syntax,
17640 'most' is upgraded to 'all'. However, in the divided syntax, some
17641 instructions take the affix as an infix, notably the s-variants of
17642 the arithmetic instructions. Of those instructions, all but six
17643 have the infix appear after the third character of the mnemonic.
17645 Accordingly, the algorithm for looking up primary opcodes given
17648 1. Look up the identifier in the opcode table.
17649 If we find a match, go to step U.
17651 2. Look up the last two characters of the identifier in the
17652 conditions table. If we find a match, look up the first N-2
17653 characters of the identifier in the opcode table. If we
17654 find a match, go to step CE.
17656 3. Look up the fourth and fifth characters of the identifier in
17657 the conditions table. If we find a match, extract those
17658 characters from the identifier, and look up the remaining
17659 characters in the opcode table. If we find a match, go
17664 U. Examine the tag field of the opcode structure, in case this is
17665 one of the six instructions with its conditional infix in an
17666 unusual place. If it is, the tag tells us where to find the
17667 infix; look it up in the conditions table and set inst.cond
17668 accordingly. Otherwise, this is an unconditional instruction.
17669 Again set inst.cond accordingly. Return the opcode structure.
17671 CE. Examine the tag field to make sure this is an instruction that
17672 should receive a conditional suffix. If it is not, fail.
17673 Otherwise, set inst.cond from the suffix we already looked up,
17674 and return the opcode structure.
17676 CM. Examine the tag field to make sure this is an instruction that
17677 should receive a conditional infix after the third character.
17678 If it is not, fail. Otherwise, undo the edits to the current
17679 line of input and proceed as for case CE. */
17681 static const struct asm_opcode
*
17682 opcode_lookup (char **str
)
17686 const struct asm_opcode
*opcode
;
17687 const struct asm_cond
*cond
;
17690 /* Scan up to the end of the mnemonic, which must end in white space,
17691 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17692 for (base
= end
= *str
; *end
!= '\0'; end
++)
17693 if (*end
== ' ' || *end
== '.')
17699 /* Handle a possible width suffix and/or Neon type suffix. */
17704 /* The .w and .n suffixes are only valid if the unified syntax is in
17706 if (unified_syntax
&& end
[1] == 'w')
17708 else if (unified_syntax
&& end
[1] == 'n')
17713 inst
.vectype
.elems
= 0;
17715 *str
= end
+ offset
;
17717 if (end
[offset
] == '.')
17719 /* See if we have a Neon type suffix (possible in either unified or
17720 non-unified ARM syntax mode). */
17721 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17724 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17730 /* Look for unaffixed or special-case affixed mnemonic. */
17731 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17736 if (opcode
->tag
< OT_odd_infix_0
)
17738 inst
.cond
= COND_ALWAYS
;
17742 if (warn_on_deprecated
&& unified_syntax
)
17743 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17744 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17745 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17748 inst
.cond
= cond
->value
;
17752 /* Cannot have a conditional suffix on a mnemonic of less than two
17754 if (end
- base
< 3)
17757 /* Look for suffixed mnemonic. */
17759 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17760 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17762 if (opcode
&& cond
)
17765 switch (opcode
->tag
)
17767 case OT_cinfix3_legacy
:
17768 /* Ignore conditional suffixes matched on infix only mnemonics. */
17772 case OT_cinfix3_deprecated
:
17773 case OT_odd_infix_unc
:
17774 if (!unified_syntax
)
17776 /* else fall through */
17780 case OT_csuf_or_in3
:
17781 inst
.cond
= cond
->value
;
17784 case OT_unconditional
:
17785 case OT_unconditionalF
:
17787 inst
.cond
= cond
->value
;
17790 /* Delayed diagnostic. */
17791 inst
.error
= BAD_COND
;
17792 inst
.cond
= COND_ALWAYS
;
17801 /* Cannot have a usual-position infix on a mnemonic of less than
17802 six characters (five would be a suffix). */
17803 if (end
- base
< 6)
17806 /* Look for infixed mnemonic in the usual position. */
17808 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17812 memcpy (save
, affix
, 2);
17813 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17814 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17816 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17817 memcpy (affix
, save
, 2);
17820 && (opcode
->tag
== OT_cinfix3
17821 || opcode
->tag
== OT_cinfix3_deprecated
17822 || opcode
->tag
== OT_csuf_or_in3
17823 || opcode
->tag
== OT_cinfix3_legacy
))
17826 if (warn_on_deprecated
&& unified_syntax
17827 && (opcode
->tag
== OT_cinfix3
17828 || opcode
->tag
== OT_cinfix3_deprecated
))
17829 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17831 inst
.cond
= cond
->value
;
17838 /* This function generates an initial IT instruction, leaving its block
17839 virtually open for the new instructions. Eventually,
17840 the mask will be updated by now_it_add_mask () each time
17841 a new instruction needs to be included in the IT block.
17842 Finally, the block is closed with close_automatic_it_block ().
17843 The block closure can be requested either from md_assemble (),
17844 a tencode (), or due to a label hook. */
17847 new_automatic_it_block (int cond
)
17849 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17850 now_it
.mask
= 0x18;
17852 now_it
.block_length
= 1;
17853 mapping_state (MAP_THUMB
);
17854 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
17855 now_it
.warn_deprecated
= FALSE
;
17856 now_it
.insn_cond
= TRUE
;
17859 /* Close an automatic IT block.
17860 See comments in new_automatic_it_block (). */
17863 close_automatic_it_block (void)
17865 now_it
.mask
= 0x10;
17866 now_it
.block_length
= 0;
17869 /* Update the mask of the current automatically-generated IT
17870 instruction. See comments in new_automatic_it_block (). */
17873 now_it_add_mask (int cond
)
17875 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17876 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17877 | ((bitvalue) << (nbit)))
17878 const int resulting_bit
= (cond
& 1);
17880 now_it
.mask
&= 0xf;
17881 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17883 (5 - now_it
.block_length
));
17884 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17886 ((5 - now_it
.block_length
) - 1) );
17887 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
17890 #undef SET_BIT_VALUE
17893 /* The IT blocks handling machinery is accessed through the these functions:
17894 it_fsm_pre_encode () from md_assemble ()
17895 set_it_insn_type () optional, from the tencode functions
17896 set_it_insn_type_last () ditto
17897 in_it_block () ditto
17898 it_fsm_post_encode () from md_assemble ()
17899 force_automatic_it_block_close () from label habdling functions
17902 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17903 initializing the IT insn type with a generic initial value depending
17904 on the inst.condition.
17905 2) During the tencode function, two things may happen:
17906 a) The tencode function overrides the IT insn type by
17907 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17908 b) The tencode function queries the IT block state by
17909 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17911 Both set_it_insn_type and in_it_block run the internal FSM state
17912 handling function (handle_it_state), because: a) setting the IT insn
17913 type may incur in an invalid state (exiting the function),
17914 and b) querying the state requires the FSM to be updated.
17915 Specifically we want to avoid creating an IT block for conditional
17916 branches, so it_fsm_pre_encode is actually a guess and we can't
17917 determine whether an IT block is required until the tencode () routine
17918 has decided what type of instruction this actually it.
17919 Because of this, if set_it_insn_type and in_it_block have to be used,
17920 set_it_insn_type has to be called first.
17922 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17923 determines the insn IT type depending on the inst.cond code.
17924 When a tencode () routine encodes an instruction that can be
17925 either outside an IT block, or, in the case of being inside, has to be
17926 the last one, set_it_insn_type_last () will determine the proper
17927 IT instruction type based on the inst.cond code. Otherwise,
17928 set_it_insn_type can be called for overriding that logic or
17929 for covering other cases.
17931 Calling handle_it_state () may not transition the IT block state to
17932 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17933 still queried. Instead, if the FSM determines that the state should
17934 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17935 after the tencode () function: that's what it_fsm_post_encode () does.
17937 Since in_it_block () calls the state handling function to get an
17938 updated state, an error may occur (due to invalid insns combination).
17939 In that case, inst.error is set.
17940 Therefore, inst.error has to be checked after the execution of
17941 the tencode () routine.
17943 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17944 any pending state change (if any) that didn't take place in
17945 handle_it_state () as explained above. */
17948 it_fsm_pre_encode (void)
17950 if (inst
.cond
!= COND_ALWAYS
)
17951 inst
.it_insn_type
= INSIDE_IT_INSN
;
17953 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
17955 now_it
.state_handled
= 0;
17958 /* IT state FSM handling function. */
17961 handle_it_state (void)
17963 now_it
.state_handled
= 1;
17964 now_it
.insn_cond
= FALSE
;
17966 switch (now_it
.state
)
17968 case OUTSIDE_IT_BLOCK
:
17969 switch (inst
.it_insn_type
)
17971 case OUTSIDE_IT_INSN
:
17974 case INSIDE_IT_INSN
:
17975 case INSIDE_IT_LAST_INSN
:
17976 if (thumb_mode
== 0)
17979 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
17980 as_tsktsk (_("Warning: conditional outside an IT block"\
17985 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
17986 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
17988 /* Automatically generate the IT instruction. */
17989 new_automatic_it_block (inst
.cond
);
17990 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
17991 close_automatic_it_block ();
17995 inst
.error
= BAD_OUT_IT
;
18001 case IF_INSIDE_IT_LAST_INSN
:
18002 case NEUTRAL_IT_INSN
:
18006 now_it
.state
= MANUAL_IT_BLOCK
;
18007 now_it
.block_length
= 0;
18012 case AUTOMATIC_IT_BLOCK
:
18013 /* Three things may happen now:
18014 a) We should increment current it block size;
18015 b) We should close current it block (closing insn or 4 insns);
18016 c) We should close current it block and start a new one (due
18017 to incompatible conditions or
18018 4 insns-length block reached). */
18020 switch (inst
.it_insn_type
)
18022 case OUTSIDE_IT_INSN
:
18023 /* The closure of the block shall happen immediatelly,
18024 so any in_it_block () call reports the block as closed. */
18025 force_automatic_it_block_close ();
18028 case INSIDE_IT_INSN
:
18029 case INSIDE_IT_LAST_INSN
:
18030 case IF_INSIDE_IT_LAST_INSN
:
18031 now_it
.block_length
++;
18033 if (now_it
.block_length
> 4
18034 || !now_it_compatible (inst
.cond
))
18036 force_automatic_it_block_close ();
18037 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
18038 new_automatic_it_block (inst
.cond
);
18042 now_it
.insn_cond
= TRUE
;
18043 now_it_add_mask (inst
.cond
);
18046 if (now_it
.state
== AUTOMATIC_IT_BLOCK
18047 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
18048 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
18049 close_automatic_it_block ();
18052 case NEUTRAL_IT_INSN
:
18053 now_it
.block_length
++;
18054 now_it
.insn_cond
= TRUE
;
18056 if (now_it
.block_length
> 4)
18057 force_automatic_it_block_close ();
18059 now_it_add_mask (now_it
.cc
& 1);
18063 close_automatic_it_block ();
18064 now_it
.state
= MANUAL_IT_BLOCK
;
18069 case MANUAL_IT_BLOCK
:
18071 /* Check conditional suffixes. */
18072 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
18075 now_it
.mask
&= 0x1f;
18076 is_last
= (now_it
.mask
== 0x10);
18077 now_it
.insn_cond
= TRUE
;
18079 switch (inst
.it_insn_type
)
18081 case OUTSIDE_IT_INSN
:
18082 inst
.error
= BAD_NOT_IT
;
18085 case INSIDE_IT_INSN
:
18086 if (cond
!= inst
.cond
)
18088 inst
.error
= BAD_IT_COND
;
18093 case INSIDE_IT_LAST_INSN
:
18094 case IF_INSIDE_IT_LAST_INSN
:
18095 if (cond
!= inst
.cond
)
18097 inst
.error
= BAD_IT_COND
;
18102 inst
.error
= BAD_BRANCH
;
18107 case NEUTRAL_IT_INSN
:
18108 /* The BKPT instruction is unconditional even in an IT block. */
18112 inst
.error
= BAD_IT_IT
;
18122 struct depr_insn_mask
18124 unsigned long pattern
;
18125 unsigned long mask
;
18126 const char* description
;
18129 /* List of 16-bit instruction patterns deprecated in an IT block in
18131 static const struct depr_insn_mask depr_it_insns
[] = {
18132 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18133 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18134 { 0xa000, 0xb800, N_("ADR") },
18135 { 0x4800, 0xf800, N_("Literal loads") },
18136 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18137 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18138 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18139 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18140 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18145 it_fsm_post_encode (void)
18149 if (!now_it
.state_handled
)
18150 handle_it_state ();
18152 if (now_it
.insn_cond
18153 && !now_it
.warn_deprecated
18154 && warn_on_deprecated
18155 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
18157 if (inst
.instruction
>= 0x10000)
18159 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18160 "deprecated in ARMv8"));
18161 now_it
.warn_deprecated
= TRUE
;
18165 const struct depr_insn_mask
*p
= depr_it_insns
;
18167 while (p
->mask
!= 0)
18169 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
18171 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18172 "of the following class are deprecated in ARMv8: "
18173 "%s"), p
->description
);
18174 now_it
.warn_deprecated
= TRUE
;
18182 if (now_it
.block_length
> 1)
18184 as_tsktsk (_("IT blocks containing more than one conditional "
18185 "instruction are deprecated in ARMv8"));
18186 now_it
.warn_deprecated
= TRUE
;
18190 is_last
= (now_it
.mask
== 0x10);
18193 now_it
.state
= OUTSIDE_IT_BLOCK
;
18199 force_automatic_it_block_close (void)
18201 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
18203 close_automatic_it_block ();
18204 now_it
.state
= OUTSIDE_IT_BLOCK
;
18212 if (!now_it
.state_handled
)
18213 handle_it_state ();
18215 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
18218 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18219 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18220 here, hence the "known" in the function name. */
18223 known_t32_only_insn (const struct asm_opcode
*opcode
)
18225 /* Original Thumb-1 wide instruction. */
18226 if (opcode
->tencode
== do_t_blx
18227 || opcode
->tencode
== do_t_branch23
18228 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
18229 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
18232 /* Wide-only instruction added to ARMv8-M Baseline. */
18233 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
18234 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
18235 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
18236 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
18242 /* Whether wide instruction variant can be used if available for a valid OPCODE
18246 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
18248 if (known_t32_only_insn (opcode
))
18251 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18252 of variant T3 of B.W is checked in do_t_branch. */
18253 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18254 && opcode
->tencode
== do_t_branch
)
18257 /* Wide instruction variants of all instructions with narrow *and* wide
18258 variants become available with ARMv6t2. Other opcodes are either
18259 narrow-only or wide-only and are thus available if OPCODE is valid. */
18260 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
18263 /* OPCODE with narrow only instruction variant or wide variant not
18269 md_assemble (char *str
)
18272 const struct asm_opcode
* opcode
;
18274 /* Align the previous label if needed. */
18275 if (last_label_seen
!= NULL
)
18277 symbol_set_frag (last_label_seen
, frag_now
);
18278 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
18279 S_SET_SEGMENT (last_label_seen
, now_seg
);
18282 memset (&inst
, '\0', sizeof (inst
));
18283 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
18285 opcode
= opcode_lookup (&p
);
18288 /* It wasn't an instruction, but it might be a register alias of
18289 the form alias .req reg, or a Neon .dn/.qn directive. */
18290 if (! create_register_alias (str
, p
)
18291 && ! create_neon_reg_alias (str
, p
))
18292 as_bad (_("bad instruction `%s'"), str
);
18297 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
18298 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18300 /* The value which unconditional instructions should have in place of the
18301 condition field. */
18302 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
18306 arm_feature_set variant
;
18308 variant
= cpu_variant
;
18309 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18310 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
18311 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
18312 /* Check that this instruction is supported for this CPU. */
18313 if (!opcode
->tvariant
18314 || (thumb_mode
== 1
18315 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
18317 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
18320 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
18321 && opcode
->tencode
!= do_t_branch
)
18323 as_bad (_("Thumb does not support conditional execution"));
18327 /* Two things are addressed here:
18328 1) Implicit require narrow instructions on Thumb-1.
18329 This avoids relaxation accidentally introducing Thumb-2
18331 2) Reject wide instructions in non Thumb-2 cores.
18333 Only instructions with narrow and wide variants need to be handled
18334 but selecting all non wide-only instructions is easier. */
18335 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
18336 && !t32_insn_ok (variant
, opcode
))
18338 if (inst
.size_req
== 0)
18340 else if (inst
.size_req
== 4)
18342 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
18343 as_bad (_("selected processor does not support 32bit wide "
18344 "variant of instruction `%s'"), str
);
18346 as_bad (_("selected processor does not support `%s' in "
18347 "Thumb-2 mode"), str
);
18352 inst
.instruction
= opcode
->tvalue
;
18354 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
18356 /* Prepare the it_insn_type for those encodings that don't set
18358 it_fsm_pre_encode ();
18360 opcode
->tencode ();
18362 it_fsm_post_encode ();
18365 if (!(inst
.error
|| inst
.relax
))
18367 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
18368 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
18369 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
18371 as_bad (_("cannot honor width suffix -- `%s'"), str
);
18376 /* Something has gone badly wrong if we try to relax a fixed size
18378 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
18380 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18381 *opcode
->tvariant
);
18382 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18383 set those bits when Thumb-2 32-bit instructions are seen. The impact
18384 of relaxable instructions will be considered later after we finish all
18386 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
18387 variant
= arm_arch_none
;
18389 variant
= cpu_variant
;
18390 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
18391 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18394 check_neon_suffixes
;
18398 mapping_state (MAP_THUMB
);
18401 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18405 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18406 is_bx
= (opcode
->aencode
== do_bx
);
18408 /* Check that this instruction is supported for this CPU. */
18409 if (!(is_bx
&& fix_v4bx
)
18410 && !(opcode
->avariant
&&
18411 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18413 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18418 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18422 inst
.instruction
= opcode
->avalue
;
18423 if (opcode
->tag
== OT_unconditionalF
)
18424 inst
.instruction
|= 0xFU
<< 28;
18426 inst
.instruction
|= inst
.cond
<< 28;
18427 inst
.size
= INSN_SIZE
;
18428 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18430 it_fsm_pre_encode ();
18431 opcode
->aencode ();
18432 it_fsm_post_encode ();
18434 /* Arm mode bx is marked as both v4T and v5 because it's still required
18435 on a hypothetical non-thumb v5 core. */
18437 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18439 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18440 *opcode
->avariant
);
18442 check_neon_suffixes
;
18446 mapping_state (MAP_ARM
);
18451 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18459 check_it_blocks_finished (void)
18464 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18465 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18466 == MANUAL_IT_BLOCK
)
18468 as_warn (_("section '%s' finished with an open IT block."),
18472 if (now_it
.state
== MANUAL_IT_BLOCK
)
18473 as_warn (_("file finished with an open IT block."));
18477 /* Various frobbings of labels and their addresses. */
18480 arm_start_line_hook (void)
18482 last_label_seen
= NULL
;
18486 arm_frob_label (symbolS
* sym
)
18488 last_label_seen
= sym
;
18490 ARM_SET_THUMB (sym
, thumb_mode
);
18492 #if defined OBJ_COFF || defined OBJ_ELF
18493 ARM_SET_INTERWORK (sym
, support_interwork
);
18496 force_automatic_it_block_close ();
18498 /* Note - do not allow local symbols (.Lxxx) to be labelled
18499 as Thumb functions. This is because these labels, whilst
18500 they exist inside Thumb code, are not the entry points for
18501 possible ARM->Thumb calls. Also, these labels can be used
18502 as part of a computed goto or switch statement. eg gcc
18503 can generate code that looks like this:
18505 ldr r2, [pc, .Laaa]
18515 The first instruction loads the address of the jump table.
18516 The second instruction converts a table index into a byte offset.
18517 The third instruction gets the jump address out of the table.
18518 The fourth instruction performs the jump.
18520 If the address stored at .Laaa is that of a symbol which has the
18521 Thumb_Func bit set, then the linker will arrange for this address
18522 to have the bottom bit set, which in turn would mean that the
18523 address computation performed by the third instruction would end
18524 up with the bottom bit set. Since the ARM is capable of unaligned
18525 word loads, the instruction would then load the incorrect address
18526 out of the jump table, and chaos would ensue. */
18527 if (label_is_thumb_function_name
18528 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18529 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18531 /* When the address of a Thumb function is taken the bottom
18532 bit of that address should be set. This will allow
18533 interworking between Arm and Thumb functions to work
18536 THUMB_SET_FUNC (sym
, 1);
18538 label_is_thumb_function_name
= FALSE
;
18541 dwarf2_emit_label (sym
);
18545 arm_data_in_code (void)
18547 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18549 *input_line_pointer
= '/';
18550 input_line_pointer
+= 5;
18551 *input_line_pointer
= 0;
18559 arm_canonicalize_symbol_name (char * name
)
18563 if (thumb_mode
&& (len
= strlen (name
)) > 5
18564 && streq (name
+ len
- 5, "/data"))
18565 *(name
+ len
- 5) = 0;
18570 /* Table of all register names defined by default. The user can
18571 define additional names with .req. Note that all register names
18572 should appear in both upper and lowercase variants. Some registers
18573 also have mixed-case names. */
18575 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18576 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18577 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18578 #define REGSET(p,t) \
18579 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18580 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18581 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18582 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18583 #define REGSETH(p,t) \
18584 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18585 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18586 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18587 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18588 #define REGSET2(p,t) \
18589 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18590 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18591 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18592 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18593 #define SPLRBANK(base,bank,t) \
18594 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18595 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18596 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18597 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18598 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18599 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18601 static const struct reg_entry reg_names
[] =
18603 /* ARM integer registers. */
18604 REGSET(r
, RN
), REGSET(R
, RN
),
18606 /* ATPCS synonyms. */
18607 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18608 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18609 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18611 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18612 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18613 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18615 /* Well-known aliases. */
18616 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18617 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18619 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18620 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18622 /* Coprocessor numbers. */
18623 REGSET(p
, CP
), REGSET(P
, CP
),
18625 /* Coprocessor register numbers. The "cr" variants are for backward
18627 REGSET(c
, CN
), REGSET(C
, CN
),
18628 REGSET(cr
, CN
), REGSET(CR
, CN
),
18630 /* ARM banked registers. */
18631 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18632 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18633 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18634 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18635 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18636 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18637 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18639 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18640 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18641 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18642 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18643 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18644 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18645 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18646 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18648 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18649 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18650 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18651 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18652 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18653 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18654 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18655 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18656 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18658 /* FPA registers. */
18659 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18660 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18662 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18663 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18665 /* VFP SP registers. */
18666 REGSET(s
,VFS
), REGSET(S
,VFS
),
18667 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18669 /* VFP DP Registers. */
18670 REGSET(d
,VFD
), REGSET(D
,VFD
),
18671 /* Extra Neon DP registers. */
18672 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18674 /* Neon QP registers. */
18675 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18677 /* VFP control registers. */
18678 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18679 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18680 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18681 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18682 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18683 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18685 /* Maverick DSP coprocessor registers. */
18686 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18687 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18689 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18690 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18691 REGDEF(dspsc
,0,DSPSC
),
18693 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18694 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18695 REGDEF(DSPSC
,0,DSPSC
),
18697 /* iWMMXt data registers - p0, c0-15. */
18698 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18700 /* iWMMXt control registers - p1, c0-3. */
18701 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18702 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18703 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18704 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18706 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18707 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18708 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18709 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18710 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18712 /* XScale accumulator registers. */
18713 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18719 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18720 within psr_required_here. */
18721 static const struct asm_psr psrs
[] =
18723 /* Backward compatibility notation. Note that "all" is no longer
18724 truly all possible PSR bits. */
18725 {"all", PSR_c
| PSR_f
},
18729 /* Individual flags. */
18735 /* Combinations of flags. */
18736 {"fs", PSR_f
| PSR_s
},
18737 {"fx", PSR_f
| PSR_x
},
18738 {"fc", PSR_f
| PSR_c
},
18739 {"sf", PSR_s
| PSR_f
},
18740 {"sx", PSR_s
| PSR_x
},
18741 {"sc", PSR_s
| PSR_c
},
18742 {"xf", PSR_x
| PSR_f
},
18743 {"xs", PSR_x
| PSR_s
},
18744 {"xc", PSR_x
| PSR_c
},
18745 {"cf", PSR_c
| PSR_f
},
18746 {"cs", PSR_c
| PSR_s
},
18747 {"cx", PSR_c
| PSR_x
},
18748 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18749 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18750 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18751 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18752 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18753 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18754 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18755 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18756 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18757 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18758 {"scf", PSR_s
| PSR_c
| PSR_f
},
18759 {"scx", PSR_s
| PSR_c
| PSR_x
},
18760 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18761 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18762 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18763 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18764 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18765 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18766 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18767 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18768 {"csf", PSR_c
| PSR_s
| PSR_f
},
18769 {"csx", PSR_c
| PSR_s
| PSR_x
},
18770 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18771 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18772 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18773 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18774 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18775 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18776 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18777 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18778 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18779 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18780 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18781 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18782 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18783 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18784 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18785 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18786 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18787 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18788 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18789 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18790 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18791 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18792 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18793 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18794 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18795 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18798 /* Table of V7M psr names. */
18799 static const struct asm_psr v7m_psrs
[] =
18801 {"apsr", 0x0 }, {"APSR", 0x0 },
18802 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
18803 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
18804 {"psr", 0x3 }, {"PSR", 0x3 },
18805 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
18806 {"ipsr", 0x5 }, {"IPSR", 0x5 },
18807 {"epsr", 0x6 }, {"EPSR", 0x6 },
18808 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
18809 {"msp", 0x8 }, {"MSP", 0x8 },
18810 {"psp", 0x9 }, {"PSP", 0x9 },
18811 {"msplim", 0xa }, {"MSPLIM", 0xa },
18812 {"psplim", 0xb }, {"PSPLIM", 0xb },
18813 {"primask", 0x10}, {"PRIMASK", 0x10},
18814 {"basepri", 0x11}, {"BASEPRI", 0x11},
18815 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
18816 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
18817 {"control", 0x14}, {"CONTROL", 0x14},
18818 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
18819 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
18820 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
18821 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
18822 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
18823 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
18824 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18825 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
18826 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
18829 /* Table of all shift-in-operand names. */
18830 static const struct asm_shift_name shift_names
[] =
18832 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18833 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18834 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18835 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18836 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18837 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18840 /* Table of all explicit relocation names. */
18842 static struct reloc_entry reloc_names
[] =
18844 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
18845 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
18846 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
18847 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
18848 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
18849 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
18850 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
18851 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
18852 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
18853 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
18854 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
18855 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
18856 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
18857 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
18858 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
18859 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
18860 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
18861 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
18865 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18866 static const struct asm_cond conds
[] =
18870 {"cs", 0x2}, {"hs", 0x2},
18871 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18885 #define UL_BARRIER(L,U,CODE,FEAT) \
18886 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18887 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18889 static struct asm_barrier_opt barrier_opt_names
[] =
18891 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
18892 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
18893 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
18894 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
18895 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
18896 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
18897 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
18898 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
18899 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
18900 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
18901 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
18902 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
18903 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
18904 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
18905 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
18906 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
18911 /* Table of ARM-format instructions. */
18913 /* Macros for gluing together operand strings. N.B. In all cases
18914 other than OPS0, the trailing OP_stop comes from default
18915 zero-initialization of the unspecified elements of the array. */
18916 #define OPS0() { OP_stop, }
18917 #define OPS1(a) { OP_##a, }
18918 #define OPS2(a,b) { OP_##a,OP_##b, }
18919 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18920 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18921 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18922 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18924 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18925 This is useful when mixing operands for ARM and THUMB, i.e. using the
18926 MIX_ARM_THUMB_OPERANDS macro.
18927 In order to use these macros, prefix the number of operands with _
18929 #define OPS_1(a) { a, }
18930 #define OPS_2(a,b) { a,b, }
18931 #define OPS_3(a,b,c) { a,b,c, }
18932 #define OPS_4(a,b,c,d) { a,b,c,d, }
18933 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18934 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18936 /* These macros abstract out the exact format of the mnemonic table and
18937 save some repeated characters. */
18939 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18940 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18941 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18942 THUMB_VARIANT, do_##ae, do_##te }
18944 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18945 a T_MNEM_xyz enumerator. */
18946 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18947 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18948 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18949 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18951 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18952 infix after the third character. */
18953 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18954 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18955 THUMB_VARIANT, do_##ae, do_##te }
18956 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18957 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18958 THUMB_VARIANT, do_##ae, do_##te }
18959 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18960 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18961 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18962 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18963 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18964 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18965 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18966 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18968 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18969 field is still 0xE. Many of the Thumb variants can be executed
18970 conditionally, so this is checked separately. */
18971 #define TUE(mnem, op, top, nops, ops, ae, te) \
18972 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18973 THUMB_VARIANT, do_##ae, do_##te }
18975 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18976 Used by mnemonics that have very minimal differences in the encoding for
18977 ARM and Thumb variants and can be handled in a common function. */
18978 #define TUEc(mnem, op, top, nops, ops, en) \
18979 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18980 THUMB_VARIANT, do_##en, do_##en }
18982 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18983 condition code field. */
18984 #define TUF(mnem, op, top, nops, ops, ae, te) \
18985 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18986 THUMB_VARIANT, do_##ae, do_##te }
18988 /* ARM-only variants of all the above. */
18989 #define CE(mnem, op, nops, ops, ae) \
18990 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18992 #define C3(mnem, op, nops, ops, ae) \
18993 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18995 /* Legacy mnemonics that always have conditional infix after the third
18997 #define CL(mnem, op, nops, ops, ae) \
18998 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18999 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19001 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19002 #define cCE(mnem, op, nops, ops, ae) \
19003 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19005 /* Legacy coprocessor instructions where conditional infix and conditional
19006 suffix are ambiguous. For consistency this includes all FPA instructions,
19007 not just the potentially ambiguous ones. */
19008 #define cCL(mnem, op, nops, ops, ae) \
19009 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19010 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19012 /* Coprocessor, takes either a suffix or a position-3 infix
19013 (for an FPA corner case). */
19014 #define C3E(mnem, op, nops, ops, ae) \
19015 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19016 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19018 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19019 { m1 #m2 m3, OPS##nops ops, \
19020 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19021 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19023 #define CM(m1, m2, op, nops, ops, ae) \
19024 xCM_ (m1, , m2, op, nops, ops, ae), \
19025 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19026 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19027 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19028 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19029 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19030 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19031 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19032 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19033 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19034 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19035 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19036 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19037 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19038 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19039 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19040 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19041 xCM_ (m1, le, m2, op, nops, ops, ae), \
19042 xCM_ (m1, al, m2, op, nops, ops, ae)
19044 #define UE(mnem, op, nops, ops, ae) \
19045 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19047 #define UF(mnem, op, nops, ops, ae) \
19048 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19050 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19051 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19052 use the same encoding function for each. */
19053 #define NUF(mnem, op, nops, ops, enc) \
19054 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19055 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19057 /* Neon data processing, version which indirects through neon_enc_tab for
19058 the various overloaded versions of opcodes. */
19059 #define nUF(mnem, op, nops, ops, enc) \
19060 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19061 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19063 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19065 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19066 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19067 THUMB_VARIANT, do_##enc, do_##enc }
19069 #define NCE(mnem, op, nops, ops, enc) \
19070 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19072 #define NCEF(mnem, op, nops, ops, enc) \
19073 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19075 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19076 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19077 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19078 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19080 #define nCE(mnem, op, nops, ops, enc) \
19081 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19083 #define nCEF(mnem, op, nops, ops, enc) \
19084 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19088 static const struct asm_opcode insns
[] =
19090 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19091 #define THUMB_VARIANT & arm_ext_v4t
19092 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19093 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19094 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19095 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19096 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19097 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19098 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19099 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19100 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19101 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19102 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19103 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19104 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19105 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19106 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19107 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19109 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19110 for setting PSR flag bits. They are obsolete in V6 and do not
19111 have Thumb equivalents. */
19112 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19113 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19114 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
19115 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19116 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19117 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
19118 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19119 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19120 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
19122 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
19123 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
19124 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19125 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19127 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
19128 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19129 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
19131 OP_ADDRGLDR
),ldst
, t_ldst
),
19132 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19134 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19135 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19136 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19137 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19138 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19139 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19141 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19142 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19143 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
19144 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
19147 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
19148 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
19149 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
19150 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
19152 /* Thumb-compatibility pseudo ops. */
19153 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19154 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19155 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19156 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19157 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19158 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19159 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19160 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19161 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
19162 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
19163 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
19164 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
19166 /* These may simplify to neg. */
19167 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19168 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19170 #undef THUMB_VARIANT
19171 #define THUMB_VARIANT & arm_ext_v6
19173 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
19175 /* V1 instructions with no Thumb analogue prior to V6T2. */
19176 #undef THUMB_VARIANT
19177 #define THUMB_VARIANT & arm_ext_v6t2
19179 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19180 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19181 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
19183 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19184 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19185 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
19186 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19188 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19189 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19191 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19192 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19194 /* V1 instructions with no Thumb analogue at all. */
19195 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
19196 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
19198 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19199 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19200 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19201 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19202 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19203 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19204 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19205 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19208 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19209 #undef THUMB_VARIANT
19210 #define THUMB_VARIANT & arm_ext_v4t
19212 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19213 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19215 #undef THUMB_VARIANT
19216 #define THUMB_VARIANT & arm_ext_v6t2
19218 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19219 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
19221 /* Generic coprocessor instructions. */
19222 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19223 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19224 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19225 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19226 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19227 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19228 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19231 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19233 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19234 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19237 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19238 #undef THUMB_VARIANT
19239 #define THUMB_VARIANT & arm_ext_msr
19241 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
19242 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
19245 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19246 #undef THUMB_VARIANT
19247 #define THUMB_VARIANT & arm_ext_v6t2
19249 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19250 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19251 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19252 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19253 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19254 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19255 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19256 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19259 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19260 #undef THUMB_VARIANT
19261 #define THUMB_VARIANT & arm_ext_v4t
19263 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19264 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19265 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19266 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19267 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19268 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19271 #define ARM_VARIANT & arm_ext_v4t_5
19273 /* ARM Architecture 4T. */
19274 /* Note: bx (and blx) are required on V5, even if the processor does
19275 not support Thumb. */
19276 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
19279 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19280 #undef THUMB_VARIANT
19281 #define THUMB_VARIANT & arm_ext_v5t
19283 /* Note: blx has 2 variants; the .value coded here is for
19284 BLX(2). Only this variant has conditional execution. */
19285 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
19286 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
19288 #undef THUMB_VARIANT
19289 #define THUMB_VARIANT & arm_ext_v6t2
19291 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
19292 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19293 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19294 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19295 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19296 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19297 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19298 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19301 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19302 #undef THUMB_VARIANT
19303 #define THUMB_VARIANT & arm_ext_v5exp
19305 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19306 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19307 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19308 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19310 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19311 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19313 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19314 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19315 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19316 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19318 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19319 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19320 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19321 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19323 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19324 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19326 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19327 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19328 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19329 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19332 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19333 #undef THUMB_VARIANT
19334 #define THUMB_VARIANT & arm_ext_v6t2
19336 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
19337 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
19339 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
19340 ADDRGLDRS
), ldrd
, t_ldstd
),
19342 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19343 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19346 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19348 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
19351 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19352 #undef THUMB_VARIANT
19353 #define THUMB_VARIANT & arm_ext_v6
19355 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19356 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19357 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19358 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19359 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19360 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19361 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19362 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19363 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19364 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
19366 #undef THUMB_VARIANT
19367 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19369 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
19370 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19372 #undef THUMB_VARIANT
19373 #define THUMB_VARIANT & arm_ext_v6t2
19375 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19376 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19378 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
19379 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
19381 /* ARM V6 not included in V7M. */
19382 #undef THUMB_VARIANT
19383 #define THUMB_VARIANT & arm_ext_v6_notm
19384 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19385 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19386 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
19387 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
19388 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19389 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19390 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
19391 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19392 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
19393 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19394 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19395 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19396 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19397 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19398 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
19399 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
19400 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19401 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19402 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
19404 /* ARM V6 not included in V7M (eg. integer SIMD). */
19405 #undef THUMB_VARIANT
19406 #define THUMB_VARIANT & arm_ext_v6_dsp
19407 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19408 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19409 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19410 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19411 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19412 /* Old name for QASX. */
19413 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19414 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19415 /* Old name for QSAX. */
19416 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19417 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19418 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19419 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19420 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19421 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19422 /* Old name for SASX. */
19423 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19424 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19425 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19426 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19427 /* Old name for SHASX. */
19428 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19429 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19430 /* Old name for SHSAX. */
19431 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19432 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19433 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19434 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19435 /* Old name for SSAX. */
19436 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19437 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19438 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19439 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19440 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19441 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19442 /* Old name for UASX. */
19443 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19444 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19445 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19446 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19447 /* Old name for UHASX. */
19448 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19449 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19450 /* Old name for UHSAX. */
19451 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19452 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19453 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19454 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19455 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19456 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19457 /* Old name for UQASX. */
19458 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19459 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19460 /* Old name for UQSAX. */
19461 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19462 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19463 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19464 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19465 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19466 /* Old name for USAX. */
19467 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19468 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19469 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19470 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19471 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19472 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19473 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19474 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19475 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19476 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19477 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19478 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19479 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19480 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19481 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19482 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19483 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19484 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19485 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19486 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19487 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19488 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19489 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19490 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19491 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19492 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19493 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19494 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19495 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19496 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19497 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19498 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19499 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19500 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19503 #define ARM_VARIANT & arm_ext_v6k
19504 #undef THUMB_VARIANT
19505 #define THUMB_VARIANT & arm_ext_v6k
19507 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19508 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19509 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19510 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19512 #undef THUMB_VARIANT
19513 #define THUMB_VARIANT & arm_ext_v6_notm
19514 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19516 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19517 RRnpcb
), strexd
, t_strexd
),
19519 #undef THUMB_VARIANT
19520 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19521 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19523 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19525 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19527 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19529 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19532 #define ARM_VARIANT & arm_ext_sec
19533 #undef THUMB_VARIANT
19534 #define THUMB_VARIANT & arm_ext_sec
19536 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19539 #define ARM_VARIANT & arm_ext_virt
19540 #undef THUMB_VARIANT
19541 #define THUMB_VARIANT & arm_ext_virt
19543 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19544 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19547 #define ARM_VARIANT & arm_ext_pan
19548 #undef THUMB_VARIANT
19549 #define THUMB_VARIANT & arm_ext_pan
19551 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19554 #define ARM_VARIANT & arm_ext_v6t2
19555 #undef THUMB_VARIANT
19556 #define THUMB_VARIANT & arm_ext_v6t2
19558 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19559 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19560 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19561 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19563 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19564 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19566 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19567 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19568 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19569 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19571 #undef THUMB_VARIANT
19572 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19573 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19574 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19576 /* Thumb-only instructions. */
19578 #define ARM_VARIANT NULL
19579 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19580 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19582 /* ARM does not really have an IT instruction, so always allow it.
19583 The opcode is copied from Thumb in order to allow warnings in
19584 -mimplicit-it=[never | arm] modes. */
19586 #define ARM_VARIANT & arm_ext_v1
19587 #undef THUMB_VARIANT
19588 #define THUMB_VARIANT & arm_ext_v6t2
19590 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19591 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19592 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19593 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19594 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19595 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19596 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19597 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19598 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19599 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19600 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19601 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19602 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19603 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19604 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19605 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19606 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19607 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19609 /* Thumb2 only instructions. */
19611 #define ARM_VARIANT NULL
19613 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19614 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19615 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19616 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19617 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19618 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19620 /* Hardware division instructions. */
19622 #define ARM_VARIANT & arm_ext_adiv
19623 #undef THUMB_VARIANT
19624 #define THUMB_VARIANT & arm_ext_div
19626 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19627 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19629 /* ARM V6M/V7 instructions. */
19631 #define ARM_VARIANT & arm_ext_barrier
19632 #undef THUMB_VARIANT
19633 #define THUMB_VARIANT & arm_ext_barrier
19635 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19636 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19637 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19639 /* ARM V7 instructions. */
19641 #define ARM_VARIANT & arm_ext_v7
19642 #undef THUMB_VARIANT
19643 #define THUMB_VARIANT & arm_ext_v7
19645 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19646 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19649 #define ARM_VARIANT & arm_ext_mp
19650 #undef THUMB_VARIANT
19651 #define THUMB_VARIANT & arm_ext_mp
19653 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19655 /* AArchv8 instructions. */
19657 #define ARM_VARIANT & arm_ext_v8
19659 /* Instructions shared between armv8-a and armv8-m. */
19660 #undef THUMB_VARIANT
19661 #define THUMB_VARIANT & arm_ext_atomics
19663 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19664 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19665 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19666 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19667 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19668 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19669 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19670 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19671 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19672 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19674 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19676 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19678 #undef THUMB_VARIANT
19679 #define THUMB_VARIANT & arm_ext_v8
19681 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19682 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19683 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19685 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19687 /* ARMv8 T32 only. */
19689 #define ARM_VARIANT NULL
19690 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19691 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19692 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19694 /* FP for ARMv8. */
19696 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19697 #undef THUMB_VARIANT
19698 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19700 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19701 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19702 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19703 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19704 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19705 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19706 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19707 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19708 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19709 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19710 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19711 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19712 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19713 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19714 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19715 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19716 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19718 /* Crypto v1 extensions. */
19720 #define ARM_VARIANT & fpu_crypto_ext_armv8
19721 #undef THUMB_VARIANT
19722 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19724 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19725 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19726 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19727 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19728 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19729 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19730 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19731 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19732 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19733 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19734 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19735 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19736 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19737 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19740 #define ARM_VARIANT & crc_ext_armv8
19741 #undef THUMB_VARIANT
19742 #define THUMB_VARIANT & crc_ext_armv8
19743 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19744 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19745 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19746 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19747 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19748 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19750 /* ARMv8.2 RAS extension. */
19752 #define ARM_VARIANT & arm_ext_ras
19753 #undef THUMB_VARIANT
19754 #define THUMB_VARIANT & arm_ext_ras
19755 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
19758 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19759 #undef THUMB_VARIANT
19760 #define THUMB_VARIANT NULL
19762 cCE("wfs", e200110
, 1, (RR
), rd
),
19763 cCE("rfs", e300110
, 1, (RR
), rd
),
19764 cCE("wfc", e400110
, 1, (RR
), rd
),
19765 cCE("rfc", e500110
, 1, (RR
), rd
),
19767 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19768 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19769 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19770 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19772 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19773 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19774 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19775 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19777 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19778 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19779 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19780 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19781 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19782 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19783 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19784 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19785 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19786 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19787 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19788 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19790 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19791 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19792 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19793 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19794 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19795 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19796 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19797 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19798 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19799 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19800 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19801 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19803 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19804 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19805 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19806 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19807 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19808 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19809 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19810 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19811 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19812 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19813 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19814 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19816 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19817 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19818 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19819 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19820 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19821 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19822 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19823 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19824 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19825 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19826 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19827 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19829 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19830 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19831 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19832 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19833 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19834 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19835 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
19836 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
19837 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
19838 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
19839 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
19840 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
19842 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
19843 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
19844 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
19845 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
19846 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
19847 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
19848 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
19849 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
19850 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
19851 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
19852 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
19853 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
19855 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
19856 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
19857 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
19858 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
19859 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
19860 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
19861 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
19862 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
19863 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
19864 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
19865 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
19866 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
19868 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
19869 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
19870 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
19871 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
19872 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
19873 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
19874 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
19875 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
19876 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
19877 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
19878 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
19879 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
19881 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
19882 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
19883 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
19884 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
19885 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
19886 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
19887 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
19888 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
19889 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
19890 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
19891 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
19892 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
19894 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
19895 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
19896 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
19897 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
19898 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
19899 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
19900 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
19901 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
19902 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
19903 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
19904 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
19905 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
19907 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
19908 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
19909 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
19910 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
19911 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
19912 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
19913 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
19914 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
19915 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
19916 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
19917 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
19918 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
19920 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
19921 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
19922 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
19923 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
19924 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
19925 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
19926 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
19927 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
19928 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
19929 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
19930 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
19931 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
19933 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
19934 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
19935 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
19936 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
19937 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
19938 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
19939 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
19940 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
19941 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
19942 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
19943 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
19944 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
19946 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
19947 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
19948 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
19949 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
19950 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
19951 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
19952 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
19953 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
19954 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
19955 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
19956 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
19957 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
19959 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
19960 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
19961 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
19962 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
19963 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
19964 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
19965 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
19966 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
19967 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
19968 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
19969 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
19970 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
19972 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
19973 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
19974 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
19975 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
19976 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
19977 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
19978 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
19979 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
19980 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
19981 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
19982 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
19983 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
19985 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19986 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19987 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19988 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19989 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19990 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19991 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19992 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19993 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19994 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19995 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19996 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19998 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19999 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20000 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20001 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20002 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20003 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20004 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20005 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20006 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20007 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20008 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20009 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20011 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20012 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20013 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20014 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20015 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20016 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20017 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20018 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20019 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20020 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20021 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20022 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20024 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20025 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20026 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20027 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20028 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20029 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20030 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20031 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20032 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20033 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20034 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20035 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20037 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20038 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20039 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20040 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20041 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20042 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20043 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20044 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20045 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20046 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20047 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20048 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20050 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20051 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20052 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20053 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20054 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20055 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20056 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20057 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20058 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20059 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20060 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20061 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20063 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20064 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20065 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20066 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20067 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20068 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20069 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20070 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20071 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20072 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20073 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20074 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20076 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20077 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20078 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20079 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20080 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20081 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20082 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20083 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20084 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20085 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20086 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20087 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20089 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20090 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20091 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20092 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20093 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20094 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20095 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20096 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20097 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20098 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20099 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20100 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20102 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20103 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20104 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20105 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20106 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20107 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20108 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20109 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20110 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20111 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20112 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20113 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20115 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20116 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20117 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20118 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20119 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20120 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20121 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20122 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20123 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20124 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20125 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20126 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20128 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20129 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20130 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20131 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20132 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20133 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20134 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20135 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20136 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20137 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20138 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20139 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20141 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20142 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20143 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20144 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20145 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20146 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20147 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20148 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20149 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20150 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20151 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20152 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20154 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20155 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20156 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20157 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20159 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
20160 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
20161 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
20162 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
20163 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
20164 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
20165 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
20166 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
20167 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
20168 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
20169 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
20170 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
20172 /* The implementation of the FIX instruction is broken on some
20173 assemblers, in that it accepts a precision specifier as well as a
20174 rounding specifier, despite the fact that this is meaningless.
20175 To be more compatible, we accept it as well, though of course it
20176 does not set any bits. */
20177 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
20178 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
20179 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
20180 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
20181 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
20182 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
20183 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
20184 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
20185 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
20186 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
20187 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
20188 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
20189 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
20191 /* Instructions that were new with the real FPA, call them V2. */
20193 #define ARM_VARIANT & fpu_fpa_ext_v2
20195 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20196 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20197 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20198 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20199 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20200 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20203 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20205 /* Moves and type conversions. */
20206 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20207 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
20208 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
20209 cCE("fmstat", ef1fa10
, 0, (), noargs
),
20210 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
20211 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
20212 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20213 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20214 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20215 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20216 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20217 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20218 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
20219 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
20221 /* Memory operations. */
20222 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20223 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20224 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20225 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20226 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20227 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20228 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20229 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20230 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20231 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20232 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20233 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20234 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20235 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20236 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20237 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20238 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20239 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20241 /* Monadic operations. */
20242 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20243 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20244 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20246 /* Dyadic operations. */
20247 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20248 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20249 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20250 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20251 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20252 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20253 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20254 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20255 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20258 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20259 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
20260 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20261 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
20263 /* Double precision load/store are still present on single precision
20264 implementations. */
20265 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20266 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20267 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20268 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20269 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20270 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20271 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20272 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20273 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20274 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20277 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20279 /* Moves and type conversions. */
20280 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20281 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20282 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20283 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20284 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20285 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20286 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20287 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20288 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20289 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20290 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20291 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20292 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20294 /* Monadic operations. */
20295 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20296 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20297 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20299 /* Dyadic operations. */
20300 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20301 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20302 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20303 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20304 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20305 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20306 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20307 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20308 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20311 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20312 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
20313 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20314 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
20317 #define ARM_VARIANT & fpu_vfp_ext_v2
20319 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
20320 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
20321 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
20322 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
20324 /* Instructions which may belong to either the Neon or VFP instruction sets.
20325 Individual encoder functions perform additional architecture checks. */
20327 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20328 #undef THUMB_VARIANT
20329 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20331 /* These mnemonics are unique to VFP. */
20332 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
20333 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
20334 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20335 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20336 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20337 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20338 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20339 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
20340 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
20341 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
20343 /* Mnemonics shared by Neon and VFP. */
20344 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
20345 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20346 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20348 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20349 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20351 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20352 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20354 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20355 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20356 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20357 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20358 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20359 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20360 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20361 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20363 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
20364 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
20365 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
20366 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
20369 /* NOTE: All VMOV encoding is special-cased! */
20370 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
20371 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
20374 #define ARM_VARIANT & arm_ext_fp16
20375 #undef THUMB_VARIANT
20376 #define THUMB_VARIANT & arm_ext_fp16
20377 /* New instructions added from v8.2, allowing the extraction and insertion of
20378 the upper 16 bits of a 32-bit vector register. */
20379 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
20380 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
20382 #undef THUMB_VARIANT
20383 #define THUMB_VARIANT & fpu_neon_ext_v1
20385 #define ARM_VARIANT & fpu_neon_ext_v1
20387 /* Data processing with three registers of the same length. */
20388 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20389 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
20390 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
20391 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20392 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20393 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20394 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20395 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20396 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20397 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20398 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20399 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20400 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20401 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20402 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20403 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20404 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20405 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20406 /* If not immediate, fall back to neon_dyadic_i64_su.
20407 shl_imm should accept I8 I16 I32 I64,
20408 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20409 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
20410 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
20411 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
20412 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
20413 /* Logic ops, types optional & ignored. */
20414 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20415 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20416 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20417 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20418 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20419 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20420 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20421 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20422 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20423 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20424 /* Bitfield ops, untyped. */
20425 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20426 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20427 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20428 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20429 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20430 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20431 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20432 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20433 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20434 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20435 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20436 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20437 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20438 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20439 back to neon_dyadic_if_su. */
20440 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20441 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20442 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20443 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20444 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20445 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20446 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20447 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20448 /* Comparison. Type I8 I16 I32 F32. */
20449 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20450 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20451 /* As above, D registers only. */
20452 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20453 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20454 /* Int and float variants, signedness unimportant. */
20455 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20456 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20457 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20458 /* Add/sub take types I8 I16 I32 I64 F32. */
20459 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20460 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20461 /* vtst takes sizes 8, 16, 32. */
20462 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20463 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20464 /* VMUL takes I8 I16 I32 F32 P8. */
20465 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20466 /* VQD{R}MULH takes S16 S32. */
20467 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20468 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20469 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20470 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20471 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20472 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20473 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20474 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20475 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20476 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20477 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20478 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20479 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20480 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20481 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20482 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20483 /* ARM v8.1 extension. */
20484 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20485 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20486 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20487 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20489 /* Two address, int/float. Types S8 S16 S32 F32. */
20490 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20491 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20493 /* Data processing with two registers and a shift amount. */
20494 /* Right shifts, and variants with rounding.
20495 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20496 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20497 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20498 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20499 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20500 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20501 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20502 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20503 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20504 /* Shift and insert. Sizes accepted 8 16 32 64. */
20505 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20506 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20507 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20508 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20509 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20510 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20511 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20512 /* Right shift immediate, saturating & narrowing, with rounding variants.
20513 Types accepted S16 S32 S64 U16 U32 U64. */
20514 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20515 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20516 /* As above, unsigned. Types accepted S16 S32 S64. */
20517 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20518 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20519 /* Right shift narrowing. Types accepted I16 I32 I64. */
20520 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20521 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20522 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20523 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20524 /* CVT with optional immediate for fixed-point variant. */
20525 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20527 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20528 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20530 /* Data processing, three registers of different lengths. */
20531 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20532 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20533 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20534 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20535 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20536 /* If not scalar, fall back to neon_dyadic_long.
20537 Vector types as above, scalar types S16 S32 U16 U32. */
20538 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20539 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20540 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20541 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20542 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20543 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20544 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20545 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20546 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20547 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20548 /* Saturating doubling multiplies. Types S16 S32. */
20549 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20550 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20551 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20552 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20553 S16 S32 U16 U32. */
20554 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20556 /* Extract. Size 8. */
20557 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20558 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20560 /* Two registers, miscellaneous. */
20561 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20562 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20563 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20564 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20565 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20566 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20567 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20568 /* Vector replicate. Sizes 8 16 32. */
20569 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20570 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20571 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20572 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20573 /* VMOVN. Types I16 I32 I64. */
20574 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20575 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20576 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20577 /* VQMOVUN. Types S16 S32 S64. */
20578 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20579 /* VZIP / VUZP. Sizes 8 16 32. */
20580 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20581 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20582 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20583 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20584 /* VQABS / VQNEG. Types S8 S16 S32. */
20585 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20586 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20587 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20588 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20589 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20590 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20591 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20592 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20593 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20594 /* Reciprocal estimates. Types U32 F16 F32. */
20595 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20596 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20597 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20598 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20599 /* VCLS. Types S8 S16 S32. */
20600 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20601 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20602 /* VCLZ. Types I8 I16 I32. */
20603 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20604 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20605 /* VCNT. Size 8. */
20606 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20607 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20608 /* Two address, untyped. */
20609 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20610 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20611 /* VTRN. Sizes 8 16 32. */
20612 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20613 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20615 /* Table lookup. Size 8. */
20616 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20617 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20619 #undef THUMB_VARIANT
20620 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20622 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20624 /* Neon element/structure load/store. */
20625 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20626 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20627 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20628 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20629 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20630 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20631 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20632 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20634 #undef THUMB_VARIANT
20635 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20637 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20638 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20639 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20640 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20641 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20642 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20643 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20644 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20645 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20646 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20648 #undef THUMB_VARIANT
20649 #define THUMB_VARIANT & fpu_vfp_ext_v3
20651 #define ARM_VARIANT & fpu_vfp_ext_v3
20653 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20654 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20655 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20656 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20657 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20658 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20659 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20660 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20661 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20664 #define ARM_VARIANT & fpu_vfp_ext_fma
20665 #undef THUMB_VARIANT
20666 #define THUMB_VARIANT & fpu_vfp_ext_fma
20667 /* Mnemonics shared by Neon and VFP. These are included in the
20668 VFP FMA variant; NEON and VFP FMA always includes the NEON
20669 FMA instructions. */
20670 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20671 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20672 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20673 the v form should always be used. */
20674 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20675 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20676 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20677 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20678 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20679 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20681 #undef THUMB_VARIANT
20683 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20685 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20686 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20687 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20688 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20689 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20690 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20691 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20692 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20695 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20697 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20698 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20699 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20700 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20701 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20702 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20703 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20704 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20705 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20706 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20707 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20708 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20709 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20710 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20711 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20712 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20713 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20714 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20715 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20716 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20717 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20718 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20719 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20720 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20721 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20722 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20723 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20724 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20725 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20726 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20727 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20728 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20729 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20730 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20731 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20732 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20733 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20734 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20735 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20736 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20737 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20738 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20739 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20740 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20741 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20742 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20743 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20744 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20745 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20746 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20747 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20748 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20749 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20750 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20751 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20752 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20753 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20754 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20755 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20756 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20757 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20758 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20759 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20760 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20761 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20762 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20763 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20764 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20765 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20766 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20767 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20768 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20769 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20770 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20771 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20772 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20773 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20774 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20775 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20776 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20777 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20778 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20779 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20780 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20781 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20782 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20783 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20784 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20785 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20786 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20787 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20788 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20789 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20790 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20791 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20792 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20793 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20794 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20795 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20796 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20797 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20798 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20799 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20800 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20801 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20802 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20803 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20804 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20805 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20806 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20807 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20808 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20809 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20810 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20811 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20812 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20813 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20814 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20815 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20816 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20817 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20818 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20819 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20820 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20821 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20822 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20823 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20824 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20825 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20826 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20827 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20828 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20829 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20830 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20831 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20832 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20833 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20834 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20835 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20836 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20837 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20838 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20839 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20840 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20841 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20842 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20843 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20844 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20845 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20846 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20847 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20848 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20849 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20850 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20851 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20852 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20853 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20854 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20855 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20856 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20857 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20858 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
20861 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20863 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
20864 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
20865 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
20866 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20867 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20868 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20869 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20870 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20871 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20872 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20873 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20874 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20875 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20876 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20877 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20878 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20879 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20880 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20881 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20882 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20883 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
20884 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20885 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20886 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20887 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20888 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20889 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20890 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20891 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20892 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20893 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20894 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20895 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20896 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20897 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20898 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20899 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20900 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20901 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20902 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20903 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20904 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20905 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20906 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20907 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20908 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20909 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20910 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20911 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20912 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20913 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20914 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20915 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20916 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20917 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20918 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20919 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20922 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20924 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20925 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20926 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20927 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20928 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20929 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20930 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20931 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20932 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
20933 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
20934 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
20935 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
20936 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
20937 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
20938 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
20939 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
20940 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
20941 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
20942 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
20943 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
20944 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
20945 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
20946 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
20947 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
20948 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
20949 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
20950 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
20951 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
20952 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
20953 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
20954 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
20955 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
20956 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
20957 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
20958 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
20959 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
20960 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
20961 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
20962 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
20963 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
20964 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
20965 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
20966 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
20967 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
20968 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
20969 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
20970 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
20971 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
20972 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
20973 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
20974 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
20975 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
20976 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
20977 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
20978 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20979 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20980 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20981 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20982 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20983 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20984 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
20985 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
20986 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
20987 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
20988 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20989 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20990 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20991 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20992 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20993 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20994 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20995 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20996 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20997 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20998 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20999 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21001 /* ARMv8-M instructions. */
21003 #define ARM_VARIANT NULL
21004 #undef THUMB_VARIANT
21005 #define THUMB_VARIANT & arm_ext_v8m
21006 TUE("sg", 0, e97fe97f
, 0, (), 0, noargs
),
21007 TUE("blxns", 0, 4784, 1, (RRnpc
), 0, t_blx
),
21008 TUE("bxns", 0, 4704, 1, (RRnpc
), 0, t_bx
),
21009 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
21010 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
21011 TUE("tta", 0, e840f080
, 2, (RRnpc
, RRnpc
), 0, tt
),
21012 TUE("ttat", 0, e840f0c0
, 2, (RRnpc
, RRnpc
), 0, tt
),
21014 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21015 instructions behave as nop if no VFP is present. */
21016 #undef THUMB_VARIANT
21017 #define THUMB_VARIANT & arm_ext_v8m_main
21018 TUEc("vlldm", 0, ec300a00
, 1, (RRnpc
), rn
),
21019 TUEc("vlstm", 0, ec200a00
, 1, (RRnpc
), rn
),
21022 #undef THUMB_VARIANT
21048 /* MD interface: bits in the object file. */
21050 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21051 for use in the a.out file, and stores them in the array pointed to by buf.
21052 This knows about the endian-ness of the target machine and does
21053 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21054 2 (short) and 4 (long) Floating numbers are put out as a series of
21055 LITTLENUMS (shorts, here at least). */
21058 md_number_to_chars (char * buf
, valueT val
, int n
)
21060 if (target_big_endian
)
21061 number_to_chars_bigendian (buf
, val
, n
);
21063 number_to_chars_littleendian (buf
, val
, n
);
21067 md_chars_to_number (char * buf
, int n
)
21070 unsigned char * where
= (unsigned char *) buf
;
21072 if (target_big_endian
)
21077 result
|= (*where
++ & 255);
21085 result
|= (where
[n
] & 255);
21092 /* MD interface: Sections. */
21094 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21095 that an rs_machine_dependent frag may reach. */
21098 arm_frag_max_var (fragS
*fragp
)
21100 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21101 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21103 Note that we generate relaxable instructions even for cases that don't
21104 really need it, like an immediate that's a trivial constant. So we're
21105 overestimating the instruction size for some of those cases. Rather
21106 than putting more intelligence here, it would probably be better to
21107 avoid generating a relaxation frag in the first place when it can be
21108 determined up front that a short instruction will suffice. */
21110 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
21114 /* Estimate the size of a frag before relaxing. Assume everything fits in
21118 md_estimate_size_before_relax (fragS
* fragp
,
21119 segT segtype ATTRIBUTE_UNUSED
)
21125 /* Convert a machine dependent frag. */
21128 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
21130 unsigned long insn
;
21131 unsigned long old_op
;
21139 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21141 old_op
= bfd_get_16(abfd
, buf
);
21142 if (fragp
->fr_symbol
)
21144 exp
.X_op
= O_symbol
;
21145 exp
.X_add_symbol
= fragp
->fr_symbol
;
21149 exp
.X_op
= O_constant
;
21151 exp
.X_add_number
= fragp
->fr_offset
;
21152 opcode
= fragp
->fr_subtype
;
21155 case T_MNEM_ldr_pc
:
21156 case T_MNEM_ldr_pc2
:
21157 case T_MNEM_ldr_sp
:
21158 case T_MNEM_str_sp
:
21165 if (fragp
->fr_var
== 4)
21167 insn
= THUMB_OP32 (opcode
);
21168 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
21170 insn
|= (old_op
& 0x700) << 4;
21174 insn
|= (old_op
& 7) << 12;
21175 insn
|= (old_op
& 0x38) << 13;
21177 insn
|= 0x00000c00;
21178 put_thumb32_insn (buf
, insn
);
21179 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
21183 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
21185 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
21188 if (fragp
->fr_var
== 4)
21190 insn
= THUMB_OP32 (opcode
);
21191 insn
|= (old_op
& 0xf0) << 4;
21192 put_thumb32_insn (buf
, insn
);
21193 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
21197 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21198 exp
.X_add_number
-= 4;
21206 if (fragp
->fr_var
== 4)
21208 int r0off
= (opcode
== T_MNEM_mov
21209 || opcode
== T_MNEM_movs
) ? 0 : 8;
21210 insn
= THUMB_OP32 (opcode
);
21211 insn
= (insn
& 0xe1ffffff) | 0x10000000;
21212 insn
|= (old_op
& 0x700) << r0off
;
21213 put_thumb32_insn (buf
, insn
);
21214 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21218 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
21223 if (fragp
->fr_var
== 4)
21225 insn
= THUMB_OP32(opcode
);
21226 put_thumb32_insn (buf
, insn
);
21227 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
21230 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
21234 if (fragp
->fr_var
== 4)
21236 insn
= THUMB_OP32(opcode
);
21237 insn
|= (old_op
& 0xf00) << 14;
21238 put_thumb32_insn (buf
, insn
);
21239 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
21242 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
21245 case T_MNEM_add_sp
:
21246 case T_MNEM_add_pc
:
21247 case T_MNEM_inc_sp
:
21248 case T_MNEM_dec_sp
:
21249 if (fragp
->fr_var
== 4)
21251 /* ??? Choose between add and addw. */
21252 insn
= THUMB_OP32 (opcode
);
21253 insn
|= (old_op
& 0xf0) << 4;
21254 put_thumb32_insn (buf
, insn
);
21255 if (opcode
== T_MNEM_add_pc
)
21256 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
21258 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21261 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21269 if (fragp
->fr_var
== 4)
21271 insn
= THUMB_OP32 (opcode
);
21272 insn
|= (old_op
& 0xf0) << 4;
21273 insn
|= (old_op
& 0xf) << 16;
21274 put_thumb32_insn (buf
, insn
);
21275 if (insn
& (1 << 20))
21276 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21278 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21281 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21287 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
21288 (enum bfd_reloc_code_real
) reloc_type
);
21289 fixp
->fx_file
= fragp
->fr_file
;
21290 fixp
->fx_line
= fragp
->fr_line
;
21291 fragp
->fr_fix
+= fragp
->fr_var
;
21293 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21294 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
21295 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
21296 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
21299 /* Return the size of a relaxable immediate operand instruction.
21300 SHIFT and SIZE specify the form of the allowable immediate. */
21302 relax_immediate (fragS
*fragp
, int size
, int shift
)
21308 /* ??? Should be able to do better than this. */
21309 if (fragp
->fr_symbol
)
21312 low
= (1 << shift
) - 1;
21313 mask
= (1 << (shift
+ size
)) - (1 << shift
);
21314 offset
= fragp
->fr_offset
;
21315 /* Force misaligned offsets to 32-bit variant. */
21318 if (offset
& ~mask
)
21323 /* Get the address of a symbol during relaxation. */
21325 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
21331 sym
= fragp
->fr_symbol
;
21332 sym_frag
= symbol_get_frag (sym
);
21333 know (S_GET_SEGMENT (sym
) != absolute_section
21334 || sym_frag
== &zero_address_frag
);
21335 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
21337 /* If frag has yet to be reached on this pass, assume it will
21338 move by STRETCH just as we did. If this is not so, it will
21339 be because some frag between grows, and that will force
21343 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
21347 /* Adjust stretch for any alignment frag. Note that if have
21348 been expanding the earlier code, the symbol may be
21349 defined in what appears to be an earlier frag. FIXME:
21350 This doesn't handle the fr_subtype field, which specifies
21351 a maximum number of bytes to skip when doing an
21353 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
21355 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
21358 stretch
= - ((- stretch
)
21359 & ~ ((1 << (int) f
->fr_offset
) - 1));
21361 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
21373 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21376 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
21381 /* Assume worst case for symbols not known to be in the same section. */
21382 if (fragp
->fr_symbol
== NULL
21383 || !S_IS_DEFINED (fragp
->fr_symbol
)
21384 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21385 || S_IS_WEAK (fragp
->fr_symbol
))
21388 val
= relaxed_symbol_addr (fragp
, stretch
);
21389 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
21390 addr
= (addr
+ 4) & ~3;
21391 /* Force misaligned targets to 32-bit variant. */
21395 if (val
< 0 || val
> 1020)
21400 /* Return the size of a relaxable add/sub immediate instruction. */
21402 relax_addsub (fragS
*fragp
, asection
*sec
)
21407 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21408 op
= bfd_get_16(sec
->owner
, buf
);
21409 if ((op
& 0xf) == ((op
>> 4) & 0xf))
21410 return relax_immediate (fragp
, 8, 0);
21412 return relax_immediate (fragp
, 3, 0);
21415 /* Return TRUE iff the definition of symbol S could be pre-empted
21416 (overridden) at link or load time. */
21418 symbol_preemptible (symbolS
*s
)
21420 /* Weak symbols can always be pre-empted. */
21424 /* Non-global symbols cannot be pre-empted. */
21425 if (! S_IS_EXTERNAL (s
))
21429 /* In ELF, a global symbol can be marked protected, or private. In that
21430 case it can't be pre-empted (other definitions in the same link unit
21431 would violate the ODR). */
21432 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21436 /* Other global symbols might be pre-empted. */
21440 /* Return the size of a relaxable branch instruction. BITS is the
21441 size of the offset field in the narrow instruction. */
21444 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21450 /* Assume worst case for symbols not known to be in the same section. */
21451 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21452 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21453 || S_IS_WEAK (fragp
->fr_symbol
))
21457 /* A branch to a function in ARM state will require interworking. */
21458 if (S_IS_DEFINED (fragp
->fr_symbol
)
21459 && ARM_IS_FUNC (fragp
->fr_symbol
))
21463 if (symbol_preemptible (fragp
->fr_symbol
))
21466 val
= relaxed_symbol_addr (fragp
, stretch
);
21467 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21470 /* Offset is a signed value *2 */
21472 if (val
>= limit
|| val
< -limit
)
21478 /* Relax a machine dependent frag. This returns the amount by which
21479 the current size of the frag should change. */
21482 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21487 oldsize
= fragp
->fr_var
;
21488 switch (fragp
->fr_subtype
)
21490 case T_MNEM_ldr_pc2
:
21491 newsize
= relax_adr (fragp
, sec
, stretch
);
21493 case T_MNEM_ldr_pc
:
21494 case T_MNEM_ldr_sp
:
21495 case T_MNEM_str_sp
:
21496 newsize
= relax_immediate (fragp
, 8, 2);
21500 newsize
= relax_immediate (fragp
, 5, 2);
21504 newsize
= relax_immediate (fragp
, 5, 1);
21508 newsize
= relax_immediate (fragp
, 5, 0);
21511 newsize
= relax_adr (fragp
, sec
, stretch
);
21517 newsize
= relax_immediate (fragp
, 8, 0);
21520 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21523 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21525 case T_MNEM_add_sp
:
21526 case T_MNEM_add_pc
:
21527 newsize
= relax_immediate (fragp
, 8, 2);
21529 case T_MNEM_inc_sp
:
21530 case T_MNEM_dec_sp
:
21531 newsize
= relax_immediate (fragp
, 7, 2);
21537 newsize
= relax_addsub (fragp
, sec
);
21543 fragp
->fr_var
= newsize
;
21544 /* Freeze wide instructions that are at or before the same location as
21545 in the previous pass. This avoids infinite loops.
21546 Don't freeze them unconditionally because targets may be artificially
21547 misaligned by the expansion of preceding frags. */
21548 if (stretch
<= 0 && newsize
> 2)
21550 md_convert_frag (sec
->owner
, sec
, fragp
);
21554 return newsize
- oldsize
;
21557 /* Round up a section size to the appropriate boundary. */
21560 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21563 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21564 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21566 /* For a.out, force the section size to be aligned. If we don't do
21567 this, BFD will align it for us, but it will not write out the
21568 final bytes of the section. This may be a bug in BFD, but it is
21569 easier to fix it here since that is how the other a.out targets
21573 align
= bfd_get_section_alignment (stdoutput
, segment
);
21574 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21581 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21582 of an rs_align_code fragment. */
21585 arm_handle_align (fragS
* fragP
)
21587 static unsigned char const arm_noop
[2][2][4] =
21590 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21591 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21594 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21595 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21598 static unsigned char const thumb_noop
[2][2][2] =
21601 {0xc0, 0x46}, /* LE */
21602 {0x46, 0xc0}, /* BE */
21605 {0x00, 0xbf}, /* LE */
21606 {0xbf, 0x00} /* BE */
21609 static unsigned char const wide_thumb_noop
[2][4] =
21610 { /* Wide Thumb-2 */
21611 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21612 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21615 unsigned bytes
, fix
, noop_size
;
21617 const unsigned char * noop
;
21618 const unsigned char *narrow_noop
= NULL
;
21623 if (fragP
->fr_type
!= rs_align_code
)
21626 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21627 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21630 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21631 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21633 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21635 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21637 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21638 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21640 narrow_noop
= thumb_noop
[1][target_big_endian
];
21641 noop
= wide_thumb_noop
[target_big_endian
];
21644 noop
= thumb_noop
[0][target_big_endian
];
21652 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21653 ? selected_cpu
: arm_arch_none
,
21655 [target_big_endian
];
21662 fragP
->fr_var
= noop_size
;
21664 if (bytes
& (noop_size
- 1))
21666 fix
= bytes
& (noop_size
- 1);
21668 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21670 memset (p
, 0, fix
);
21677 if (bytes
& noop_size
)
21679 /* Insert a narrow noop. */
21680 memcpy (p
, narrow_noop
, noop_size
);
21682 bytes
-= noop_size
;
21686 /* Use wide noops for the remainder */
21690 while (bytes
>= noop_size
)
21692 memcpy (p
, noop
, noop_size
);
21694 bytes
-= noop_size
;
21698 fragP
->fr_fix
+= fix
;
21701 /* Called from md_do_align. Used to create an alignment
21702 frag in a code section. */
21705 arm_frag_align_code (int n
, int max
)
21709 /* We assume that there will never be a requirement
21710 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21711 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21716 _("alignments greater than %d bytes not supported in .text sections."),
21717 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21718 as_fatal ("%s", err_msg
);
21721 p
= frag_var (rs_align_code
,
21722 MAX_MEM_FOR_RS_ALIGN_CODE
,
21724 (relax_substateT
) max
,
21731 /* Perform target specific initialisation of a frag.
21732 Note - despite the name this initialisation is not done when the frag
21733 is created, but only when its type is assigned. A frag can be created
21734 and used a long time before its type is set, so beware of assuming that
21735 this initialisationis performed first. */
21739 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21741 /* Record whether this frag is in an ARM or a THUMB area. */
21742 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21745 #else /* OBJ_ELF is defined. */
21747 arm_init_frag (fragS
* fragP
, int max_chars
)
21749 int frag_thumb_mode
;
21751 /* If the current ARM vs THUMB mode has not already
21752 been recorded into this frag then do so now. */
21753 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21754 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21756 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
21758 /* Record a mapping symbol for alignment frags. We will delete this
21759 later if the alignment ends up empty. */
21760 switch (fragP
->fr_type
)
21763 case rs_align_test
:
21765 mapping_state_2 (MAP_DATA
, max_chars
);
21767 case rs_align_code
:
21768 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21775 /* When we change sections we need to issue a new mapping symbol. */
21778 arm_elf_change_section (void)
21780 /* Link an unlinked unwind index table section to the .text section. */
21781 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21782 && elf_linked_to_section (now_seg
) == NULL
)
21783 elf_linked_to_section (now_seg
) = text_section
;
21787 arm_elf_section_type (const char * str
, size_t len
)
21789 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21790 return SHT_ARM_EXIDX
;
21795 /* Code to deal with unwinding tables. */
21797 static void add_unwind_adjustsp (offsetT
);
21799 /* Generate any deferred unwind frame offset. */
21802 flush_pending_unwind (void)
21806 offset
= unwind
.pending_offset
;
21807 unwind
.pending_offset
= 0;
21809 add_unwind_adjustsp (offset
);
21812 /* Add an opcode to this list for this function. Two-byte opcodes should
21813 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21817 add_unwind_opcode (valueT op
, int length
)
21819 /* Add any deferred stack adjustment. */
21820 if (unwind
.pending_offset
)
21821 flush_pending_unwind ();
21823 unwind
.sp_restored
= 0;
21825 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21827 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21828 if (unwind
.opcodes
)
21829 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
21830 unwind
.opcode_alloc
);
21832 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
21837 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
21839 unwind
.opcode_count
++;
21843 /* Add unwind opcodes to adjust the stack pointer. */
21846 add_unwind_adjustsp (offsetT offset
)
21850 if (offset
> 0x200)
21852 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21857 /* Long form: 0xb2, uleb128. */
21858 /* This might not fit in a word so add the individual bytes,
21859 remembering the list is built in reverse order. */
21860 o
= (valueT
) ((offset
- 0x204) >> 2);
21862 add_unwind_opcode (0, 1);
21864 /* Calculate the uleb128 encoding of the offset. */
21868 bytes
[n
] = o
& 0x7f;
21874 /* Add the insn. */
21876 add_unwind_opcode (bytes
[n
- 1], 1);
21877 add_unwind_opcode (0xb2, 1);
21879 else if (offset
> 0x100)
21881 /* Two short opcodes. */
21882 add_unwind_opcode (0x3f, 1);
21883 op
= (offset
- 0x104) >> 2;
21884 add_unwind_opcode (op
, 1);
21886 else if (offset
> 0)
21888 /* Short opcode. */
21889 op
= (offset
- 4) >> 2;
21890 add_unwind_opcode (op
, 1);
21892 else if (offset
< 0)
21895 while (offset
> 0x100)
21897 add_unwind_opcode (0x7f, 1);
21900 op
= ((offset
- 4) >> 2) | 0x40;
21901 add_unwind_opcode (op
, 1);
21905 /* Finish the list of unwind opcodes for this function. */
21907 finish_unwind_opcodes (void)
21911 if (unwind
.fp_used
)
21913 /* Adjust sp as necessary. */
21914 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
21915 flush_pending_unwind ();
21917 /* After restoring sp from the frame pointer. */
21918 op
= 0x90 | unwind
.fp_reg
;
21919 add_unwind_opcode (op
, 1);
21922 flush_pending_unwind ();
21926 /* Start an exception table entry. If idx is nonzero this is an index table
21930 start_unwind_section (const segT text_seg
, int idx
)
21932 const char * text_name
;
21933 const char * prefix
;
21934 const char * prefix_once
;
21935 const char * group_name
;
21943 prefix
= ELF_STRING_ARM_unwind
;
21944 prefix_once
= ELF_STRING_ARM_unwind_once
;
21945 type
= SHT_ARM_EXIDX
;
21949 prefix
= ELF_STRING_ARM_unwind_info
;
21950 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
21951 type
= SHT_PROGBITS
;
21954 text_name
= segment_name (text_seg
);
21955 if (streq (text_name
, ".text"))
21958 if (strncmp (text_name
, ".gnu.linkonce.t.",
21959 strlen (".gnu.linkonce.t.")) == 0)
21961 prefix
= prefix_once
;
21962 text_name
+= strlen (".gnu.linkonce.t.");
21965 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
21971 /* Handle COMDAT group. */
21972 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
21974 group_name
= elf_group_name (text_seg
);
21975 if (group_name
== NULL
)
21977 as_bad (_("Group section `%s' has no group signature"),
21978 segment_name (text_seg
));
21979 ignore_rest_of_line ();
21982 flags
|= SHF_GROUP
;
21986 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
21988 /* Set the section link for index tables. */
21990 elf_linked_to_section (now_seg
) = text_seg
;
21994 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21995 personality routine data. Returns zero, or the index table value for
21996 an inline entry. */
21999 create_unwind_entry (int have_data
)
22004 /* The current word of data. */
22006 /* The number of bytes left in this word. */
22009 finish_unwind_opcodes ();
22011 /* Remember the current text section. */
22012 unwind
.saved_seg
= now_seg
;
22013 unwind
.saved_subseg
= now_subseg
;
22015 start_unwind_section (now_seg
, 0);
22017 if (unwind
.personality_routine
== NULL
)
22019 if (unwind
.personality_index
== -2)
22022 as_bad (_("handlerdata in cantunwind frame"));
22023 return 1; /* EXIDX_CANTUNWIND. */
22026 /* Use a default personality routine if none is specified. */
22027 if (unwind
.personality_index
== -1)
22029 if (unwind
.opcode_count
> 3)
22030 unwind
.personality_index
= 1;
22032 unwind
.personality_index
= 0;
22035 /* Space for the personality routine entry. */
22036 if (unwind
.personality_index
== 0)
22038 if (unwind
.opcode_count
> 3)
22039 as_bad (_("too many unwind opcodes for personality routine 0"));
22043 /* All the data is inline in the index table. */
22046 while (unwind
.opcode_count
> 0)
22048 unwind
.opcode_count
--;
22049 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22053 /* Pad with "finish" opcodes. */
22055 data
= (data
<< 8) | 0xb0;
22062 /* We get two opcodes "free" in the first word. */
22063 size
= unwind
.opcode_count
- 2;
22067 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22068 if (unwind
.personality_index
!= -1)
22070 as_bad (_("attempt to recreate an unwind entry"));
22074 /* An extra byte is required for the opcode count. */
22075 size
= unwind
.opcode_count
+ 1;
22078 size
= (size
+ 3) >> 2;
22080 as_bad (_("too many unwind opcodes"));
22082 frag_align (2, 0, 0);
22083 record_alignment (now_seg
, 2);
22084 unwind
.table_entry
= expr_build_dot ();
22086 /* Allocate the table entry. */
22087 ptr
= frag_more ((size
<< 2) + 4);
22088 /* PR 13449: Zero the table entries in case some of them are not used. */
22089 memset (ptr
, 0, (size
<< 2) + 4);
22090 where
= frag_now_fix () - ((size
<< 2) + 4);
22092 switch (unwind
.personality_index
)
22095 /* ??? Should this be a PLT generating relocation? */
22096 /* Custom personality routine. */
22097 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
22098 BFD_RELOC_ARM_PREL31
);
22103 /* Set the first byte to the number of additional words. */
22104 data
= size
> 0 ? size
- 1 : 0;
22108 /* ABI defined personality routines. */
22110 /* Three opcodes bytes are packed into the first word. */
22117 /* The size and first two opcode bytes go in the first word. */
22118 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
22123 /* Should never happen. */
22127 /* Pack the opcodes into words (MSB first), reversing the list at the same
22129 while (unwind
.opcode_count
> 0)
22133 md_number_to_chars (ptr
, data
, 4);
22138 unwind
.opcode_count
--;
22140 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22143 /* Finish off the last word. */
22146 /* Pad with "finish" opcodes. */
22148 data
= (data
<< 8) | 0xb0;
22150 md_number_to_chars (ptr
, data
, 4);
22155 /* Add an empty descriptor if there is no user-specified data. */
22156 ptr
= frag_more (4);
22157 md_number_to_chars (ptr
, 0, 4);
22164 /* Initialize the DWARF-2 unwind information for this procedure. */
22167 tc_arm_frame_initial_instructions (void)
22169 cfi_add_CFA_def_cfa (REG_SP
, 0);
22171 #endif /* OBJ_ELF */
22173 /* Convert REGNAME to a DWARF-2 register number. */
22176 tc_arm_regname_to_dw2regnum (char *regname
)
22178 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
22182 /* PR 16694: Allow VFP registers as well. */
22183 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
22187 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
22196 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
22200 exp
.X_op
= O_secrel
;
22201 exp
.X_add_symbol
= symbol
;
22202 exp
.X_add_number
= 0;
22203 emit_expr (&exp
, size
);
22207 /* MD interface: Symbol and relocation handling. */
22209 /* Return the address within the segment that a PC-relative fixup is
22210 relative to. For ARM, PC-relative fixups applied to instructions
22211 are generally relative to the location of the fixup plus 8 bytes.
22212 Thumb branches are offset by 4, and Thumb loads relative to PC
22213 require special handling. */
22216 md_pcrel_from_section (fixS
* fixP
, segT seg
)
22218 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22220 /* If this is pc-relative and we are going to emit a relocation
22221 then we just want to put out any pipeline compensation that the linker
22222 will need. Otherwise we want to use the calculated base.
22223 For WinCE we skip the bias for externals as well, since this
22224 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22226 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22227 || (arm_force_relocation (fixP
)
22229 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
22235 switch (fixP
->fx_r_type
)
22237 /* PC relative addressing on the Thumb is slightly odd as the
22238 bottom two bits of the PC are forced to zero for the
22239 calculation. This happens *after* application of the
22240 pipeline offset. However, Thumb adrl already adjusts for
22241 this, so we need not do it again. */
22242 case BFD_RELOC_ARM_THUMB_ADD
:
22245 case BFD_RELOC_ARM_THUMB_OFFSET
:
22246 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22247 case BFD_RELOC_ARM_T32_ADD_PC12
:
22248 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
22249 return (base
+ 4) & ~3;
22251 /* Thumb branches are simply offset by +4. */
22252 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
22253 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
22254 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
22255 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22256 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
22259 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22261 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22262 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22263 && ARM_IS_FUNC (fixP
->fx_addsy
)
22264 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22265 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22268 /* BLX is like branches above, but forces the low two bits of PC to
22270 case BFD_RELOC_THUMB_PCREL_BLX
:
22272 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22273 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22274 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22275 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22276 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22277 return (base
+ 4) & ~3;
22279 /* ARM mode branches are offset by +8. However, the Windows CE
22280 loader expects the relocation not to take this into account. */
22281 case BFD_RELOC_ARM_PCREL_BLX
:
22283 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22284 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22285 && ARM_IS_FUNC (fixP
->fx_addsy
)
22286 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22287 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22290 case BFD_RELOC_ARM_PCREL_CALL
:
22292 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22293 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22294 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22295 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22296 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22299 case BFD_RELOC_ARM_PCREL_BRANCH
:
22300 case BFD_RELOC_ARM_PCREL_JUMP
:
22301 case BFD_RELOC_ARM_PLT32
:
22303 /* When handling fixups immediately, because we have already
22304 discovered the value of a symbol, or the address of the frag involved
22305 we must account for the offset by +8, as the OS loader will never see the reloc.
22306 see fixup_segment() in write.c
22307 The S_IS_EXTERNAL test handles the case of global symbols.
22308 Those need the calculated base, not just the pipe compensation the linker will need. */
22310 && fixP
->fx_addsy
!= NULL
22311 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22312 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
22320 /* ARM mode loads relative to PC are also offset by +8. Unlike
22321 branches, the Windows CE loader *does* expect the relocation
22322 to take this into account. */
22323 case BFD_RELOC_ARM_OFFSET_IMM
:
22324 case BFD_RELOC_ARM_OFFSET_IMM8
:
22325 case BFD_RELOC_ARM_HWLITERAL
:
22326 case BFD_RELOC_ARM_LITERAL
:
22327 case BFD_RELOC_ARM_CP_OFF_IMM
:
22331 /* Other PC-relative relocations are un-offset. */
22337 static bfd_boolean flag_warn_syms
= TRUE
;
22340 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
22342 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22343 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22344 does mean that the resulting code might be very confusing to the reader.
22345 Also this warning can be triggered if the user omits an operand before
22346 an immediate address, eg:
22350 GAS treats this as an assignment of the value of the symbol foo to a
22351 symbol LDR, and so (without this code) it will not issue any kind of
22352 warning or error message.
22354 Note - ARM instructions are case-insensitive but the strings in the hash
22355 table are all stored in lower case, so we must first ensure that name is
22357 if (flag_warn_syms
&& arm_ops_hsh
)
22359 char * nbuf
= strdup (name
);
22362 for (p
= nbuf
; *p
; p
++)
22364 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
22366 static struct hash_control
* already_warned
= NULL
;
22368 if (already_warned
== NULL
)
22369 already_warned
= hash_new ();
22370 /* Only warn about the symbol once. To keep the code
22371 simple we let hash_insert do the lookup for us. */
22372 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
22373 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
22382 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22383 Otherwise we have no need to default values of symbols. */
22386 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
22389 if (name
[0] == '_' && name
[1] == 'G'
22390 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
22394 if (symbol_find (name
))
22395 as_bad (_("GOT already in the symbol table"));
22397 GOT_symbol
= symbol_new (name
, undefined_section
,
22398 (valueT
) 0, & zero_address_frag
);
22408 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22409 computed as two separate immediate values, added together. We
22410 already know that this value cannot be computed by just one ARM
22413 static unsigned int
22414 validate_immediate_twopart (unsigned int val
,
22415 unsigned int * highpart
)
22420 for (i
= 0; i
< 32; i
+= 2)
22421 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22427 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22429 else if (a
& 0xff0000)
22431 if (a
& 0xff000000)
22433 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22437 gas_assert (a
& 0xff000000);
22438 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22441 return (a
& 0xff) | (i
<< 7);
22448 validate_offset_imm (unsigned int val
, int hwse
)
22450 if ((hwse
&& val
> 255) || val
> 4095)
22455 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22456 negative immediate constant by altering the instruction. A bit of
22461 by inverting the second operand, and
22464 by negating the second operand. */
22467 negate_data_op (unsigned long * instruction
,
22468 unsigned long value
)
22471 unsigned long negated
, inverted
;
22473 negated
= encode_arm_immediate (-value
);
22474 inverted
= encode_arm_immediate (~value
);
22476 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22479 /* First negates. */
22480 case OPCODE_SUB
: /* ADD <-> SUB */
22481 new_inst
= OPCODE_ADD
;
22486 new_inst
= OPCODE_SUB
;
22490 case OPCODE_CMP
: /* CMP <-> CMN */
22491 new_inst
= OPCODE_CMN
;
22496 new_inst
= OPCODE_CMP
;
22500 /* Now Inverted ops. */
22501 case OPCODE_MOV
: /* MOV <-> MVN */
22502 new_inst
= OPCODE_MVN
;
22507 new_inst
= OPCODE_MOV
;
22511 case OPCODE_AND
: /* AND <-> BIC */
22512 new_inst
= OPCODE_BIC
;
22517 new_inst
= OPCODE_AND
;
22521 case OPCODE_ADC
: /* ADC <-> SBC */
22522 new_inst
= OPCODE_SBC
;
22527 new_inst
= OPCODE_ADC
;
22531 /* We cannot do anything. */
22536 if (value
== (unsigned) FAIL
)
22539 *instruction
&= OPCODE_MASK
;
22540 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22544 /* Like negate_data_op, but for Thumb-2. */
22546 static unsigned int
22547 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22551 unsigned int negated
, inverted
;
22553 negated
= encode_thumb32_immediate (-value
);
22554 inverted
= encode_thumb32_immediate (~value
);
22556 rd
= (*instruction
>> 8) & 0xf;
22557 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22560 /* ADD <-> SUB. Includes CMP <-> CMN. */
22561 case T2_OPCODE_SUB
:
22562 new_inst
= T2_OPCODE_ADD
;
22566 case T2_OPCODE_ADD
:
22567 new_inst
= T2_OPCODE_SUB
;
22571 /* ORR <-> ORN. Includes MOV <-> MVN. */
22572 case T2_OPCODE_ORR
:
22573 new_inst
= T2_OPCODE_ORN
;
22577 case T2_OPCODE_ORN
:
22578 new_inst
= T2_OPCODE_ORR
;
22582 /* AND <-> BIC. TST has no inverted equivalent. */
22583 case T2_OPCODE_AND
:
22584 new_inst
= T2_OPCODE_BIC
;
22591 case T2_OPCODE_BIC
:
22592 new_inst
= T2_OPCODE_AND
;
22597 case T2_OPCODE_ADC
:
22598 new_inst
= T2_OPCODE_SBC
;
22602 case T2_OPCODE_SBC
:
22603 new_inst
= T2_OPCODE_ADC
;
22607 /* We cannot do anything. */
22612 if (value
== (unsigned int)FAIL
)
22615 *instruction
&= T2_OPCODE_MASK
;
22616 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22620 /* Read a 32-bit thumb instruction from buf. */
22621 static unsigned long
22622 get_thumb32_insn (char * buf
)
22624 unsigned long insn
;
22625 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22626 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22632 /* We usually want to set the low bit on the address of thumb function
22633 symbols. In particular .word foo - . should have the low bit set.
22634 Generic code tries to fold the difference of two symbols to
22635 a constant. Prevent this and force a relocation when the first symbols
22636 is a thumb function. */
22639 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22641 if (op
== O_subtract
22642 && l
->X_op
== O_symbol
22643 && r
->X_op
== O_symbol
22644 && THUMB_IS_FUNC (l
->X_add_symbol
))
22646 l
->X_op
= O_subtract
;
22647 l
->X_op_symbol
= r
->X_add_symbol
;
22648 l
->X_add_number
-= r
->X_add_number
;
22652 /* Process as normal. */
22656 /* Encode Thumb2 unconditional branches and calls. The encoding
22657 for the 2 are identical for the immediate values. */
22660 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22662 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22665 addressT S
, I1
, I2
, lo
, hi
;
22667 S
= (value
>> 24) & 0x01;
22668 I1
= (value
>> 23) & 0x01;
22669 I2
= (value
>> 22) & 0x01;
22670 hi
= (value
>> 12) & 0x3ff;
22671 lo
= (value
>> 1) & 0x7ff;
22672 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22673 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22674 newval
|= (S
<< 10) | hi
;
22675 newval2
&= ~T2I1I2MASK
;
22676 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22677 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22678 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22682 md_apply_fix (fixS
* fixP
,
22686 offsetT value
= * valP
;
22688 unsigned int newimm
;
22689 unsigned long temp
;
22691 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22693 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22695 /* Note whether this will delete the relocation. */
22697 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22700 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22701 consistency with the behaviour on 32-bit hosts. Remember value
22703 value
&= 0xffffffff;
22704 value
^= 0x80000000;
22705 value
-= 0x80000000;
22708 fixP
->fx_addnumber
= value
;
22710 /* Same treatment for fixP->fx_offset. */
22711 fixP
->fx_offset
&= 0xffffffff;
22712 fixP
->fx_offset
^= 0x80000000;
22713 fixP
->fx_offset
-= 0x80000000;
22715 switch (fixP
->fx_r_type
)
22717 case BFD_RELOC_NONE
:
22718 /* This will need to go in the object file. */
22722 case BFD_RELOC_ARM_IMMEDIATE
:
22723 /* We claim that this fixup has been processed here,
22724 even if in fact we generate an error because we do
22725 not have a reloc for it, so tc_gen_reloc will reject it. */
22728 if (fixP
->fx_addsy
)
22730 const char *msg
= 0;
22732 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22733 msg
= _("undefined symbol %s used as an immediate value");
22734 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22735 msg
= _("symbol %s is in a different section");
22736 else if (S_IS_WEAK (fixP
->fx_addsy
))
22737 msg
= _("symbol %s is weak and may be overridden later");
22741 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22742 msg
, S_GET_NAME (fixP
->fx_addsy
));
22747 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22749 /* If the offset is negative, we should use encoding A2 for ADR. */
22750 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22751 newimm
= negate_data_op (&temp
, value
);
22754 newimm
= encode_arm_immediate (value
);
22756 /* If the instruction will fail, see if we can fix things up by
22757 changing the opcode. */
22758 if (newimm
== (unsigned int) FAIL
)
22759 newimm
= negate_data_op (&temp
, value
);
22762 if (newimm
== (unsigned int) FAIL
)
22764 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22765 _("invalid constant (%lx) after fixup"),
22766 (unsigned long) value
);
22770 newimm
|= (temp
& 0xfffff000);
22771 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22774 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22776 unsigned int highpart
= 0;
22777 unsigned int newinsn
= 0xe1a00000; /* nop. */
22779 if (fixP
->fx_addsy
)
22781 const char *msg
= 0;
22783 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22784 msg
= _("undefined symbol %s used as an immediate value");
22785 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22786 msg
= _("symbol %s is in a different section");
22787 else if (S_IS_WEAK (fixP
->fx_addsy
))
22788 msg
= _("symbol %s is weak and may be overridden later");
22792 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22793 msg
, S_GET_NAME (fixP
->fx_addsy
));
22798 newimm
= encode_arm_immediate (value
);
22799 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22801 /* If the instruction will fail, see if we can fix things up by
22802 changing the opcode. */
22803 if (newimm
== (unsigned int) FAIL
22804 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22806 /* No ? OK - try using two ADD instructions to generate
22808 newimm
= validate_immediate_twopart (value
, & highpart
);
22810 /* Yes - then make sure that the second instruction is
22812 if (newimm
!= (unsigned int) FAIL
)
22814 /* Still No ? Try using a negated value. */
22815 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22816 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22817 /* Otherwise - give up. */
22820 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22821 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22826 /* Replace the first operand in the 2nd instruction (which
22827 is the PC) with the destination register. We have
22828 already added in the PC in the first instruction and we
22829 do not want to do it again. */
22830 newinsn
&= ~ 0xf0000;
22831 newinsn
|= ((newinsn
& 0x0f000) << 4);
22834 newimm
|= (temp
& 0xfffff000);
22835 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22837 highpart
|= (newinsn
& 0xfffff000);
22838 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
22842 case BFD_RELOC_ARM_OFFSET_IMM
:
22843 if (!fixP
->fx_done
&& seg
->use_rela_p
)
22846 case BFD_RELOC_ARM_LITERAL
:
22852 if (validate_offset_imm (value
, 0) == FAIL
)
22854 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
22855 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22856 _("invalid literal constant: pool needs to be closer"));
22858 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22859 _("bad immediate value for offset (%ld)"),
22864 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22866 newval
&= 0xfffff000;
22869 newval
&= 0xff7ff000;
22870 newval
|= value
| (sign
? INDEX_UP
: 0);
22872 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22875 case BFD_RELOC_ARM_OFFSET_IMM8
:
22876 case BFD_RELOC_ARM_HWLITERAL
:
22882 if (validate_offset_imm (value
, 1) == FAIL
)
22884 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
22885 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22886 _("invalid literal constant: pool needs to be closer"));
22888 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22889 _("bad immediate value for 8-bit offset (%ld)"),
22894 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22896 newval
&= 0xfffff0f0;
22899 newval
&= 0xff7ff0f0;
22900 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
22902 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22905 case BFD_RELOC_ARM_T32_OFFSET_U8
:
22906 if (value
< 0 || value
> 1020 || value
% 4 != 0)
22907 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22908 _("bad immediate value for offset (%ld)"), (long) value
);
22911 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
22913 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
22916 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22917 /* This is a complicated relocation used for all varieties of Thumb32
22918 load/store instruction with immediate offset:
22920 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22921 *4, optional writeback(W)
22922 (doubleword load/store)
22924 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22925 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22926 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22927 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22928 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22930 Uppercase letters indicate bits that are already encoded at
22931 this point. Lowercase letters are our problem. For the
22932 second block of instructions, the secondary opcode nybble
22933 (bits 8..11) is present, and bit 23 is zero, even if this is
22934 a PC-relative operation. */
22935 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22937 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
22939 if ((newval
& 0xf0000000) == 0xe0000000)
22941 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22943 newval
|= (1 << 23);
22946 if (value
% 4 != 0)
22948 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22949 _("offset not a multiple of 4"));
22955 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22956 _("offset out of range"));
22961 else if ((newval
& 0x000f0000) == 0x000f0000)
22963 /* PC-relative, 12-bit offset. */
22965 newval
|= (1 << 23);
22970 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22971 _("offset out of range"));
22976 else if ((newval
& 0x00000100) == 0x00000100)
22978 /* Writeback: 8-bit, +/- offset. */
22980 newval
|= (1 << 9);
22985 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22986 _("offset out of range"));
22991 else if ((newval
& 0x00000f00) == 0x00000e00)
22993 /* T-instruction: positive 8-bit offset. */
22994 if (value
< 0 || value
> 0xff)
22996 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22997 _("offset out of range"));
23005 /* Positive 12-bit or negative 8-bit offset. */
23009 newval
|= (1 << 23);
23019 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23020 _("offset out of range"));
23027 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
23028 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
23031 case BFD_RELOC_ARM_SHIFT_IMM
:
23032 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23033 if (((unsigned long) value
) > 32
23035 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
23037 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23038 _("shift expression is too large"));
23043 /* Shifts of zero must be done as lsl. */
23045 else if (value
== 32)
23047 newval
&= 0xfffff07f;
23048 newval
|= (value
& 0x1f) << 7;
23049 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23052 case BFD_RELOC_ARM_T32_IMMEDIATE
:
23053 case BFD_RELOC_ARM_T32_ADD_IMM
:
23054 case BFD_RELOC_ARM_T32_IMM12
:
23055 case BFD_RELOC_ARM_T32_ADD_PC12
:
23056 /* We claim that this fixup has been processed here,
23057 even if in fact we generate an error because we do
23058 not have a reloc for it, so tc_gen_reloc will reject it. */
23062 && ! S_IS_DEFINED (fixP
->fx_addsy
))
23064 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23065 _("undefined symbol %s used as an immediate value"),
23066 S_GET_NAME (fixP
->fx_addsy
));
23070 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23072 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
23075 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
23076 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23078 newimm
= encode_thumb32_immediate (value
);
23079 if (newimm
== (unsigned int) FAIL
)
23080 newimm
= thumb32_negate_data_op (&newval
, value
);
23082 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
23083 && newimm
== (unsigned int) FAIL
)
23085 /* Turn add/sum into addw/subw. */
23086 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23087 newval
= (newval
& 0xfeffffff) | 0x02000000;
23088 /* No flat 12-bit imm encoding for addsw/subsw. */
23089 if ((newval
& 0x00100000) == 0)
23091 /* 12 bit immediate for addw/subw. */
23095 newval
^= 0x00a00000;
23098 newimm
= (unsigned int) FAIL
;
23104 if (newimm
== (unsigned int)FAIL
)
23106 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23107 _("invalid constant (%lx) after fixup"),
23108 (unsigned long) value
);
23112 newval
|= (newimm
& 0x800) << 15;
23113 newval
|= (newimm
& 0x700) << 4;
23114 newval
|= (newimm
& 0x0ff);
23116 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
23117 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
23120 case BFD_RELOC_ARM_SMC
:
23121 if (((unsigned long) value
) > 0xffff)
23122 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23123 _("invalid smc expression"));
23124 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23125 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23126 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23129 case BFD_RELOC_ARM_HVC
:
23130 if (((unsigned long) value
) > 0xffff)
23131 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23132 _("invalid hvc expression"));
23133 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23134 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23135 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23138 case BFD_RELOC_ARM_SWI
:
23139 if (fixP
->tc_fix_data
!= 0)
23141 if (((unsigned long) value
) > 0xff)
23142 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23143 _("invalid swi expression"));
23144 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23146 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23150 if (((unsigned long) value
) > 0x00ffffff)
23151 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23152 _("invalid swi expression"));
23153 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23155 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23159 case BFD_RELOC_ARM_MULTI
:
23160 if (((unsigned long) value
) > 0xffff)
23161 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23162 _("invalid expression in load/store multiple"));
23163 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
23164 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23168 case BFD_RELOC_ARM_PCREL_CALL
:
23170 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23172 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23173 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23174 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23175 /* Flip the bl to blx. This is a simple flip
23176 bit here because we generate PCREL_CALL for
23177 unconditional bls. */
23179 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23180 newval
= newval
| 0x10000000;
23181 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23187 goto arm_branch_common
;
23189 case BFD_RELOC_ARM_PCREL_JUMP
:
23190 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23192 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23193 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23194 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23196 /* This would map to a bl<cond>, b<cond>,
23197 b<always> to a Thumb function. We
23198 need to force a relocation for this particular
23200 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23204 case BFD_RELOC_ARM_PLT32
:
23206 case BFD_RELOC_ARM_PCREL_BRANCH
:
23208 goto arm_branch_common
;
23210 case BFD_RELOC_ARM_PCREL_BLX
:
23213 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23215 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23216 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23217 && ARM_IS_FUNC (fixP
->fx_addsy
))
23219 /* Flip the blx to a bl and warn. */
23220 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23221 newval
= 0xeb000000;
23222 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23223 _("blx to '%s' an ARM ISA state function changed to bl"),
23225 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23231 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23232 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
23236 /* We are going to store value (shifted right by two) in the
23237 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23238 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23239 also be be clear. */
23241 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23242 _("misaligned branch destination"));
23243 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
23244 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
23245 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23247 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23249 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23250 newval
|= (value
>> 2) & 0x00ffffff;
23251 /* Set the H bit on BLX instructions. */
23255 newval
|= 0x01000000;
23257 newval
&= ~0x01000000;
23259 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23263 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
23264 /* CBZ can only branch forward. */
23266 /* Attempts to use CBZ to branch to the next instruction
23267 (which, strictly speaking, are prohibited) will be turned into
23270 FIXME: It may be better to remove the instruction completely and
23271 perform relaxation. */
23274 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23275 newval
= 0xbf00; /* NOP encoding T1 */
23276 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23281 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23283 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23285 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23286 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
23287 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23292 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
23293 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
23294 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23296 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23298 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23299 newval
|= (value
& 0x1ff) >> 1;
23300 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23304 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
23305 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
23306 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23308 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23310 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23311 newval
|= (value
& 0xfff) >> 1;
23312 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23316 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23318 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23319 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23320 && ARM_IS_FUNC (fixP
->fx_addsy
)
23321 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23323 /* Force a relocation for a branch 20 bits wide. */
23326 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
23327 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23328 _("conditional branch out of range"));
23330 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23333 addressT S
, J1
, J2
, lo
, hi
;
23335 S
= (value
& 0x00100000) >> 20;
23336 J2
= (value
& 0x00080000) >> 19;
23337 J1
= (value
& 0x00040000) >> 18;
23338 hi
= (value
& 0x0003f000) >> 12;
23339 lo
= (value
& 0x00000ffe) >> 1;
23341 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23342 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23343 newval
|= (S
<< 10) | hi
;
23344 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
23345 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23346 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23350 case BFD_RELOC_THUMB_PCREL_BLX
:
23351 /* If there is a blx from a thumb state function to
23352 another thumb function flip this to a bl and warn
23356 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23357 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23358 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23360 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23361 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23362 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23364 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23365 newval
= newval
| 0x1000;
23366 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23367 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23372 goto thumb_bl_common
;
23374 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23375 /* A bl from Thumb state ISA to an internal ARM state function
23376 is converted to a blx. */
23378 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23379 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23380 && ARM_IS_FUNC (fixP
->fx_addsy
)
23381 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23383 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23384 newval
= newval
& ~0x1000;
23385 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23386 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
23392 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23393 /* For a BLX instruction, make sure that the relocation is rounded up
23394 to a word boundary. This follows the semantics of the instruction
23395 which specifies that bit 1 of the target address will come from bit
23396 1 of the base address. */
23397 value
= (value
+ 3) & ~ 3;
23400 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
23401 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23402 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23405 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
23407 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
23408 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23409 else if ((value
& ~0x1ffffff)
23410 && ((value
& ~0x1ffffff) != ~0x1ffffff))
23411 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23412 _("Thumb2 branch out of range"));
23415 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23416 encode_thumb2_b_bl_offset (buf
, value
);
23420 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23421 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
23422 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23424 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23425 encode_thumb2_b_bl_offset (buf
, value
);
23430 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23435 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23436 md_number_to_chars (buf
, value
, 2);
23440 case BFD_RELOC_ARM_TLS_CALL
:
23441 case BFD_RELOC_ARM_THM_TLS_CALL
:
23442 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23443 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23444 case BFD_RELOC_ARM_TLS_GOTDESC
:
23445 case BFD_RELOC_ARM_TLS_GD32
:
23446 case BFD_RELOC_ARM_TLS_LE32
:
23447 case BFD_RELOC_ARM_TLS_IE32
:
23448 case BFD_RELOC_ARM_TLS_LDM32
:
23449 case BFD_RELOC_ARM_TLS_LDO32
:
23450 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23453 case BFD_RELOC_ARM_GOT32
:
23454 case BFD_RELOC_ARM_GOTOFF
:
23457 case BFD_RELOC_ARM_GOT_PREL
:
23458 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23459 md_number_to_chars (buf
, value
, 4);
23462 case BFD_RELOC_ARM_TARGET2
:
23463 /* TARGET2 is not partial-inplace, so we need to write the
23464 addend here for REL targets, because it won't be written out
23465 during reloc processing later. */
23466 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23467 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23471 case BFD_RELOC_RVA
:
23473 case BFD_RELOC_ARM_TARGET1
:
23474 case BFD_RELOC_ARM_ROSEGREL32
:
23475 case BFD_RELOC_ARM_SBREL32
:
23476 case BFD_RELOC_32_PCREL
:
23478 case BFD_RELOC_32_SECREL
:
23480 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23482 /* For WinCE we only do this for pcrel fixups. */
23483 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23485 md_number_to_chars (buf
, value
, 4);
23489 case BFD_RELOC_ARM_PREL31
:
23490 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23492 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23493 if ((value
^ (value
>> 1)) & 0x40000000)
23495 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23496 _("rel31 relocation overflow"));
23498 newval
|= value
& 0x7fffffff;
23499 md_number_to_chars (buf
, newval
, 4);
23504 case BFD_RELOC_ARM_CP_OFF_IMM
:
23505 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23506 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
23507 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23509 newval
= get_thumb32_insn (buf
);
23510 if ((newval
& 0x0f200f00) == 0x0d000900)
23512 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23513 has permitted values that are multiples of 2, in the range 0
23515 if (value
< -510 || value
> 510 || (value
& 1))
23516 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23517 _("co-processor offset out of range"));
23519 else if (value
< -1023 || value
> 1023 || (value
& 3))
23520 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23521 _("co-processor offset out of range"));
23526 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23527 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23528 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23530 newval
= get_thumb32_insn (buf
);
23532 newval
&= 0xffffff00;
23535 newval
&= 0xff7fff00;
23536 if ((newval
& 0x0f200f00) == 0x0d000900)
23538 /* This is a fp16 vstr/vldr.
23540 It requires the immediate offset in the instruction is shifted
23541 left by 1 to be a half-word offset.
23543 Here, left shift by 1 first, and later right shift by 2
23544 should get the right offset. */
23547 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23549 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23550 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23551 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23553 put_thumb32_insn (buf
, newval
);
23556 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23557 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23558 if (value
< -255 || value
> 255)
23559 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23560 _("co-processor offset out of range"));
23562 goto cp_off_common
;
23564 case BFD_RELOC_ARM_THUMB_OFFSET
:
23565 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23566 /* Exactly what ranges, and where the offset is inserted depends
23567 on the type of instruction, we can establish this from the
23569 switch (newval
>> 12)
23571 case 4: /* PC load. */
23572 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23573 forced to zero for these loads; md_pcrel_from has already
23574 compensated for this. */
23576 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23577 _("invalid offset, target not word aligned (0x%08lX)"),
23578 (((unsigned long) fixP
->fx_frag
->fr_address
23579 + (unsigned long) fixP
->fx_where
) & ~3)
23580 + (unsigned long) value
);
23582 if (value
& ~0x3fc)
23583 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23584 _("invalid offset, value too big (0x%08lX)"),
23587 newval
|= value
>> 2;
23590 case 9: /* SP load/store. */
23591 if (value
& ~0x3fc)
23592 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23593 _("invalid offset, value too big (0x%08lX)"),
23595 newval
|= value
>> 2;
23598 case 6: /* Word load/store. */
23600 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23601 _("invalid offset, value too big (0x%08lX)"),
23603 newval
|= value
<< 4; /* 6 - 2. */
23606 case 7: /* Byte load/store. */
23608 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23609 _("invalid offset, value too big (0x%08lX)"),
23611 newval
|= value
<< 6;
23614 case 8: /* Halfword load/store. */
23616 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23617 _("invalid offset, value too big (0x%08lX)"),
23619 newval
|= value
<< 5; /* 6 - 1. */
23623 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23624 "Unable to process relocation for thumb opcode: %lx",
23625 (unsigned long) newval
);
23628 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23631 case BFD_RELOC_ARM_THUMB_ADD
:
23632 /* This is a complicated relocation, since we use it for all of
23633 the following immediate relocations:
23637 9bit ADD/SUB SP word-aligned
23638 10bit ADD PC/SP word-aligned
23640 The type of instruction being processed is encoded in the
23647 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23649 int rd
= (newval
>> 4) & 0xf;
23650 int rs
= newval
& 0xf;
23651 int subtract
= !!(newval
& 0x8000);
23653 /* Check for HI regs, only very restricted cases allowed:
23654 Adjusting SP, and using PC or SP to get an address. */
23655 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23656 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23657 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23658 _("invalid Hi register with immediate"));
23660 /* If value is negative, choose the opposite instruction. */
23664 subtract
= !subtract
;
23666 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23667 _("immediate value out of range"));
23672 if (value
& ~0x1fc)
23673 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23674 _("invalid immediate for stack address calculation"));
23675 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23676 newval
|= value
>> 2;
23678 else if (rs
== REG_PC
|| rs
== REG_SP
)
23680 /* PR gas/18541. If the addition is for a defined symbol
23681 within range of an ADR instruction then accept it. */
23684 && fixP
->fx_addsy
!= NULL
)
23688 if (! S_IS_DEFINED (fixP
->fx_addsy
)
23689 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
23690 || S_IS_WEAK (fixP
->fx_addsy
))
23692 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23693 _("address calculation needs a strongly defined nearby symbol"));
23697 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23699 /* Round up to the next 4-byte boundary. */
23704 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
23708 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23709 _("symbol too far away"));
23719 if (subtract
|| value
& ~0x3fc)
23720 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23721 _("invalid immediate for address calculation (value = 0x%08lX)"),
23722 (unsigned long) (subtract
? - value
: value
));
23723 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
23725 newval
|= value
>> 2;
23730 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23731 _("immediate value out of range"));
23732 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
23733 newval
|= (rd
<< 8) | value
;
23738 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23739 _("immediate value out of range"));
23740 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
23741 newval
|= rd
| (rs
<< 3) | (value
<< 6);
23744 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23747 case BFD_RELOC_ARM_THUMB_IMM
:
23748 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23749 if (value
< 0 || value
> 255)
23750 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23751 _("invalid immediate: %ld is out of range"),
23754 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23757 case BFD_RELOC_ARM_THUMB_SHIFT
:
23758 /* 5bit shift value (0..32). LSL cannot take 32. */
23759 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
23760 temp
= newval
& 0xf800;
23761 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
23762 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23763 _("invalid shift value: %ld"), (long) value
);
23764 /* Shifts of zero must be encoded as LSL. */
23766 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
23767 /* Shifts of 32 are encoded as zero. */
23768 else if (value
== 32)
23770 newval
|= value
<< 6;
23771 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23774 case BFD_RELOC_VTABLE_INHERIT
:
23775 case BFD_RELOC_VTABLE_ENTRY
:
23779 case BFD_RELOC_ARM_MOVW
:
23780 case BFD_RELOC_ARM_MOVT
:
23781 case BFD_RELOC_ARM_THUMB_MOVW
:
23782 case BFD_RELOC_ARM_THUMB_MOVT
:
23783 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23785 /* REL format relocations are limited to a 16-bit addend. */
23786 if (!fixP
->fx_done
)
23788 if (value
< -0x8000 || value
> 0x7fff)
23789 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23790 _("offset out of range"));
23792 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23793 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23798 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23799 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23801 newval
= get_thumb32_insn (buf
);
23802 newval
&= 0xfbf08f00;
23803 newval
|= (value
& 0xf000) << 4;
23804 newval
|= (value
& 0x0800) << 15;
23805 newval
|= (value
& 0x0700) << 4;
23806 newval
|= (value
& 0x00ff);
23807 put_thumb32_insn (buf
, newval
);
23811 newval
= md_chars_to_number (buf
, 4);
23812 newval
&= 0xfff0f000;
23813 newval
|= value
& 0x0fff;
23814 newval
|= (value
& 0xf000) << 4;
23815 md_number_to_chars (buf
, newval
, 4);
23820 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23821 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23822 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23823 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23824 gas_assert (!fixP
->fx_done
);
23827 bfd_boolean is_mov
;
23828 bfd_vma encoded_addend
= value
;
23830 /* Check that addend can be encoded in instruction. */
23831 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
23832 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23833 _("the offset 0x%08lX is not representable"),
23834 (unsigned long) encoded_addend
);
23836 /* Extract the instruction. */
23837 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
23838 is_mov
= (insn
& 0xf800) == 0x2000;
23843 if (!seg
->use_rela_p
)
23844 insn
|= encoded_addend
;
23850 /* Extract the instruction. */
23851 /* Encoding is the following
23856 /* The following conditions must be true :
23861 rd
= (insn
>> 4) & 0xf;
23863 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
23864 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23865 _("Unable to process relocation for thumb opcode: %lx"),
23866 (unsigned long) insn
);
23868 /* Encode as ADD immediate8 thumb 1 code. */
23869 insn
= 0x3000 | (rd
<< 8);
23871 /* Place the encoded addend into the first 8 bits of the
23873 if (!seg
->use_rela_p
)
23874 insn
|= encoded_addend
;
23877 /* Update the instruction. */
23878 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
23882 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23883 case BFD_RELOC_ARM_ALU_PC_G0
:
23884 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23885 case BFD_RELOC_ARM_ALU_PC_G1
:
23886 case BFD_RELOC_ARM_ALU_PC_G2
:
23887 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23888 case BFD_RELOC_ARM_ALU_SB_G0
:
23889 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23890 case BFD_RELOC_ARM_ALU_SB_G1
:
23891 case BFD_RELOC_ARM_ALU_SB_G2
:
23892 gas_assert (!fixP
->fx_done
);
23893 if (!seg
->use_rela_p
)
23896 bfd_vma encoded_addend
;
23897 bfd_vma addend_abs
= abs (value
);
23899 /* Check that the absolute value of the addend can be
23900 expressed as an 8-bit constant plus a rotation. */
23901 encoded_addend
= encode_arm_immediate (addend_abs
);
23902 if (encoded_addend
== (unsigned int) FAIL
)
23903 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23904 _("the offset 0x%08lX is not representable"),
23905 (unsigned long) addend_abs
);
23907 /* Extract the instruction. */
23908 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23910 /* If the addend is positive, use an ADD instruction.
23911 Otherwise use a SUB. Take care not to destroy the S bit. */
23912 insn
&= 0xff1fffff;
23918 /* Place the encoded addend into the first 12 bits of the
23920 insn
&= 0xfffff000;
23921 insn
|= encoded_addend
;
23923 /* Update the instruction. */
23924 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23928 case BFD_RELOC_ARM_LDR_PC_G0
:
23929 case BFD_RELOC_ARM_LDR_PC_G1
:
23930 case BFD_RELOC_ARM_LDR_PC_G2
:
23931 case BFD_RELOC_ARM_LDR_SB_G0
:
23932 case BFD_RELOC_ARM_LDR_SB_G1
:
23933 case BFD_RELOC_ARM_LDR_SB_G2
:
23934 gas_assert (!fixP
->fx_done
);
23935 if (!seg
->use_rela_p
)
23938 bfd_vma addend_abs
= abs (value
);
23940 /* Check that the absolute value of the addend can be
23941 encoded in 12 bits. */
23942 if (addend_abs
>= 0x1000)
23943 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23944 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23945 (unsigned long) addend_abs
);
23947 /* Extract the instruction. */
23948 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23950 /* If the addend is negative, clear bit 23 of the instruction.
23951 Otherwise set it. */
23953 insn
&= ~(1 << 23);
23957 /* Place the absolute value of the addend into the first 12 bits
23958 of the instruction. */
23959 insn
&= 0xfffff000;
23960 insn
|= addend_abs
;
23962 /* Update the instruction. */
23963 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23967 case BFD_RELOC_ARM_LDRS_PC_G0
:
23968 case BFD_RELOC_ARM_LDRS_PC_G1
:
23969 case BFD_RELOC_ARM_LDRS_PC_G2
:
23970 case BFD_RELOC_ARM_LDRS_SB_G0
:
23971 case BFD_RELOC_ARM_LDRS_SB_G1
:
23972 case BFD_RELOC_ARM_LDRS_SB_G2
:
23973 gas_assert (!fixP
->fx_done
);
23974 if (!seg
->use_rela_p
)
23977 bfd_vma addend_abs
= abs (value
);
23979 /* Check that the absolute value of the addend can be
23980 encoded in 8 bits. */
23981 if (addend_abs
>= 0x100)
23982 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23983 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23984 (unsigned long) addend_abs
);
23986 /* Extract the instruction. */
23987 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23989 /* If the addend is negative, clear bit 23 of the instruction.
23990 Otherwise set it. */
23992 insn
&= ~(1 << 23);
23996 /* Place the first four bits of the absolute value of the addend
23997 into the first 4 bits of the instruction, and the remaining
23998 four into bits 8 .. 11. */
23999 insn
&= 0xfffff0f0;
24000 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
24002 /* Update the instruction. */
24003 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24007 case BFD_RELOC_ARM_LDC_PC_G0
:
24008 case BFD_RELOC_ARM_LDC_PC_G1
:
24009 case BFD_RELOC_ARM_LDC_PC_G2
:
24010 case BFD_RELOC_ARM_LDC_SB_G0
:
24011 case BFD_RELOC_ARM_LDC_SB_G1
:
24012 case BFD_RELOC_ARM_LDC_SB_G2
:
24013 gas_assert (!fixP
->fx_done
);
24014 if (!seg
->use_rela_p
)
24017 bfd_vma addend_abs
= abs (value
);
24019 /* Check that the absolute value of the addend is a multiple of
24020 four and, when divided by four, fits in 8 bits. */
24021 if (addend_abs
& 0x3)
24022 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24023 _("bad offset 0x%08lX (must be word-aligned)"),
24024 (unsigned long) addend_abs
);
24026 if ((addend_abs
>> 2) > 0xff)
24027 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24028 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24029 (unsigned long) addend_abs
);
24031 /* Extract the instruction. */
24032 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24034 /* If the addend is negative, clear bit 23 of the instruction.
24035 Otherwise set it. */
24037 insn
&= ~(1 << 23);
24041 /* Place the addend (divided by four) into the first eight
24042 bits of the instruction. */
24043 insn
&= 0xfffffff0;
24044 insn
|= addend_abs
>> 2;
24046 /* Update the instruction. */
24047 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24051 case BFD_RELOC_ARM_V4BX
:
24052 /* This will need to go in the object file. */
24056 case BFD_RELOC_UNUSED
:
24058 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24059 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
24063 /* Translate internal representation of relocation info to BFD target
24067 tc_gen_reloc (asection
*section
, fixS
*fixp
)
24070 bfd_reloc_code_real_type code
;
24072 reloc
= XNEW (arelent
);
24074 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
24075 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
24076 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
24078 if (fixp
->fx_pcrel
)
24080 if (section
->use_rela_p
)
24081 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
24083 fixp
->fx_offset
= reloc
->address
;
24085 reloc
->addend
= fixp
->fx_offset
;
24087 switch (fixp
->fx_r_type
)
24090 if (fixp
->fx_pcrel
)
24092 code
= BFD_RELOC_8_PCREL
;
24097 if (fixp
->fx_pcrel
)
24099 code
= BFD_RELOC_16_PCREL
;
24104 if (fixp
->fx_pcrel
)
24106 code
= BFD_RELOC_32_PCREL
;
24110 case BFD_RELOC_ARM_MOVW
:
24111 if (fixp
->fx_pcrel
)
24113 code
= BFD_RELOC_ARM_MOVW_PCREL
;
24117 case BFD_RELOC_ARM_MOVT
:
24118 if (fixp
->fx_pcrel
)
24120 code
= BFD_RELOC_ARM_MOVT_PCREL
;
24124 case BFD_RELOC_ARM_THUMB_MOVW
:
24125 if (fixp
->fx_pcrel
)
24127 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
24131 case BFD_RELOC_ARM_THUMB_MOVT
:
24132 if (fixp
->fx_pcrel
)
24134 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
24138 case BFD_RELOC_NONE
:
24139 case BFD_RELOC_ARM_PCREL_BRANCH
:
24140 case BFD_RELOC_ARM_PCREL_BLX
:
24141 case BFD_RELOC_RVA
:
24142 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24143 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24144 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24145 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24146 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24147 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24148 case BFD_RELOC_VTABLE_ENTRY
:
24149 case BFD_RELOC_VTABLE_INHERIT
:
24151 case BFD_RELOC_32_SECREL
:
24153 code
= fixp
->fx_r_type
;
24156 case BFD_RELOC_THUMB_PCREL_BLX
:
24158 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
24159 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24162 code
= BFD_RELOC_THUMB_PCREL_BLX
;
24165 case BFD_RELOC_ARM_LITERAL
:
24166 case BFD_RELOC_ARM_HWLITERAL
:
24167 /* If this is called then the a literal has
24168 been referenced across a section boundary. */
24169 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24170 _("literal referenced across section boundary"));
24174 case BFD_RELOC_ARM_TLS_CALL
:
24175 case BFD_RELOC_ARM_THM_TLS_CALL
:
24176 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24177 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24178 case BFD_RELOC_ARM_GOT32
:
24179 case BFD_RELOC_ARM_GOTOFF
:
24180 case BFD_RELOC_ARM_GOT_PREL
:
24181 case BFD_RELOC_ARM_PLT32
:
24182 case BFD_RELOC_ARM_TARGET1
:
24183 case BFD_RELOC_ARM_ROSEGREL32
:
24184 case BFD_RELOC_ARM_SBREL32
:
24185 case BFD_RELOC_ARM_PREL31
:
24186 case BFD_RELOC_ARM_TARGET2
:
24187 case BFD_RELOC_ARM_TLS_LDO32
:
24188 case BFD_RELOC_ARM_PCREL_CALL
:
24189 case BFD_RELOC_ARM_PCREL_JUMP
:
24190 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
24191 case BFD_RELOC_ARM_ALU_PC_G0
:
24192 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
24193 case BFD_RELOC_ARM_ALU_PC_G1
:
24194 case BFD_RELOC_ARM_ALU_PC_G2
:
24195 case BFD_RELOC_ARM_LDR_PC_G0
:
24196 case BFD_RELOC_ARM_LDR_PC_G1
:
24197 case BFD_RELOC_ARM_LDR_PC_G2
:
24198 case BFD_RELOC_ARM_LDRS_PC_G0
:
24199 case BFD_RELOC_ARM_LDRS_PC_G1
:
24200 case BFD_RELOC_ARM_LDRS_PC_G2
:
24201 case BFD_RELOC_ARM_LDC_PC_G0
:
24202 case BFD_RELOC_ARM_LDC_PC_G1
:
24203 case BFD_RELOC_ARM_LDC_PC_G2
:
24204 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
24205 case BFD_RELOC_ARM_ALU_SB_G0
:
24206 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
24207 case BFD_RELOC_ARM_ALU_SB_G1
:
24208 case BFD_RELOC_ARM_ALU_SB_G2
:
24209 case BFD_RELOC_ARM_LDR_SB_G0
:
24210 case BFD_RELOC_ARM_LDR_SB_G1
:
24211 case BFD_RELOC_ARM_LDR_SB_G2
:
24212 case BFD_RELOC_ARM_LDRS_SB_G0
:
24213 case BFD_RELOC_ARM_LDRS_SB_G1
:
24214 case BFD_RELOC_ARM_LDRS_SB_G2
:
24215 case BFD_RELOC_ARM_LDC_SB_G0
:
24216 case BFD_RELOC_ARM_LDC_SB_G1
:
24217 case BFD_RELOC_ARM_LDC_SB_G2
:
24218 case BFD_RELOC_ARM_V4BX
:
24219 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24220 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24221 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24222 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24223 code
= fixp
->fx_r_type
;
24226 case BFD_RELOC_ARM_TLS_GOTDESC
:
24227 case BFD_RELOC_ARM_TLS_GD32
:
24228 case BFD_RELOC_ARM_TLS_LE32
:
24229 case BFD_RELOC_ARM_TLS_IE32
:
24230 case BFD_RELOC_ARM_TLS_LDM32
:
24231 /* BFD will include the symbol's address in the addend.
24232 But we don't want that, so subtract it out again here. */
24233 if (!S_IS_COMMON (fixp
->fx_addsy
))
24234 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
24235 code
= fixp
->fx_r_type
;
24239 case BFD_RELOC_ARM_IMMEDIATE
:
24240 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24241 _("internal relocation (type: IMMEDIATE) not fixed up"));
24244 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
24245 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24246 _("ADRL used for a symbol not defined in the same file"));
24249 case BFD_RELOC_ARM_OFFSET_IMM
:
24250 if (section
->use_rela_p
)
24252 code
= fixp
->fx_r_type
;
24256 if (fixp
->fx_addsy
!= NULL
24257 && !S_IS_DEFINED (fixp
->fx_addsy
)
24258 && S_IS_LOCAL (fixp
->fx_addsy
))
24260 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24261 _("undefined local label `%s'"),
24262 S_GET_NAME (fixp
->fx_addsy
));
24266 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24267 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24274 switch (fixp
->fx_r_type
)
24276 case BFD_RELOC_NONE
: type
= "NONE"; break;
24277 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
24278 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
24279 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
24280 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
24281 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
24282 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
24283 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
24284 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
24285 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
24286 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
24287 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
24288 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
24289 default: type
= _("<unknown>"); break;
24291 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24292 _("cannot represent %s relocation in this object file format"),
24299 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
24301 && fixp
->fx_addsy
== GOT_symbol
)
24303 code
= BFD_RELOC_ARM_GOTPC
;
24304 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
24308 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
24310 if (reloc
->howto
== NULL
)
24312 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24313 _("cannot represent %s relocation in this object file format"),
24314 bfd_get_reloc_code_name (code
));
24318 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24319 vtable entry to be used in the relocation's section offset. */
24320 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24321 reloc
->address
= fixp
->fx_offset
;
24326 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24329 cons_fix_new_arm (fragS
* frag
,
24333 bfd_reloc_code_real_type reloc
)
24338 FIXME: @@ Should look at CPU word size. */
24342 reloc
= BFD_RELOC_8
;
24345 reloc
= BFD_RELOC_16
;
24349 reloc
= BFD_RELOC_32
;
24352 reloc
= BFD_RELOC_64
;
24357 if (exp
->X_op
== O_secrel
)
24359 exp
->X_op
= O_symbol
;
24360 reloc
= BFD_RELOC_32_SECREL
;
24364 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
24367 #if defined (OBJ_COFF)
24369 arm_validate_fix (fixS
* fixP
)
24371 /* If the destination of the branch is a defined symbol which does not have
24372 the THUMB_FUNC attribute, then we must be calling a function which has
24373 the (interfacearm) attribute. We look for the Thumb entry point to that
24374 function and change the branch to refer to that function instead. */
24375 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
24376 && fixP
->fx_addsy
!= NULL
24377 && S_IS_DEFINED (fixP
->fx_addsy
)
24378 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
24380 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
24387 arm_force_relocation (struct fix
* fixp
)
24389 #if defined (OBJ_COFF) && defined (TE_PE)
24390 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
24394 /* In case we have a call or a branch to a function in ARM ISA mode from
24395 a thumb function or vice-versa force the relocation. These relocations
24396 are cleared off for some cores that might have blx and simple transformations
24400 switch (fixp
->fx_r_type
)
24402 case BFD_RELOC_ARM_PCREL_JUMP
:
24403 case BFD_RELOC_ARM_PCREL_CALL
:
24404 case BFD_RELOC_THUMB_PCREL_BLX
:
24405 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
24409 case BFD_RELOC_ARM_PCREL_BLX
:
24410 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24411 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24412 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24413 if (ARM_IS_FUNC (fixp
->fx_addsy
))
24422 /* Resolve these relocations even if the symbol is extern or weak.
24423 Technically this is probably wrong due to symbol preemption.
24424 In practice these relocations do not have enough range to be useful
24425 at dynamic link time, and some code (e.g. in the Linux kernel)
24426 expects these references to be resolved. */
24427 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
24428 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
24429 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
24430 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
24431 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24432 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
24433 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
24434 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
24435 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24436 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
24437 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
24438 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
24439 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
24440 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
24443 /* Always leave these relocations for the linker. */
24444 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24445 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24446 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24449 /* Always generate relocations against function symbols. */
24450 if (fixp
->fx_r_type
== BFD_RELOC_32
24452 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24455 return generic_force_reloc (fixp
);
24458 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24459 /* Relocations against function names must be left unadjusted,
24460 so that the linker can use this information to generate interworking
24461 stubs. The MIPS version of this function
24462 also prevents relocations that are mips-16 specific, but I do not
24463 know why it does this.
24466 There is one other problem that ought to be addressed here, but
24467 which currently is not: Taking the address of a label (rather
24468 than a function) and then later jumping to that address. Such
24469 addresses also ought to have their bottom bit set (assuming that
24470 they reside in Thumb code), but at the moment they will not. */
24473 arm_fix_adjustable (fixS
* fixP
)
24475 if (fixP
->fx_addsy
== NULL
)
24478 /* Preserve relocations against symbols with function type. */
24479 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24482 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24483 && fixP
->fx_subsy
== NULL
)
24486 /* We need the symbol name for the VTABLE entries. */
24487 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24488 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24491 /* Don't allow symbols to be discarded on GOT related relocs. */
24492 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24493 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24494 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24495 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24496 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24497 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24498 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24499 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24500 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24501 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24502 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24503 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24504 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24505 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24508 /* Similarly for group relocations. */
24509 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24510 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24511 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24514 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24515 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24516 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24517 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24518 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24519 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24520 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24521 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24522 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24525 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24526 offsets, so keep these symbols. */
24527 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24528 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24533 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24537 elf32_arm_target_format (void)
24540 return (target_big_endian
24541 ? "elf32-bigarm-symbian"
24542 : "elf32-littlearm-symbian");
24543 #elif defined (TE_VXWORKS)
24544 return (target_big_endian
24545 ? "elf32-bigarm-vxworks"
24546 : "elf32-littlearm-vxworks");
24547 #elif defined (TE_NACL)
24548 return (target_big_endian
24549 ? "elf32-bigarm-nacl"
24550 : "elf32-littlearm-nacl");
24552 if (target_big_endian
)
24553 return "elf32-bigarm";
24555 return "elf32-littlearm";
24560 armelf_frob_symbol (symbolS
* symp
,
24563 elf_frob_symbol (symp
, puntp
);
24567 /* MD interface: Finalization. */
24572 literal_pool
* pool
;
24574 /* Ensure that all the IT blocks are properly closed. */
24575 check_it_blocks_finished ();
24577 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24579 /* Put it at the end of the relevant section. */
24580 subseg_set (pool
->section
, pool
->sub_section
);
24582 arm_elf_change_section ();
24589 /* Remove any excess mapping symbols generated for alignment frags in
24590 SEC. We may have created a mapping symbol before a zero byte
24591 alignment; remove it if there's a mapping symbol after the
24594 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24595 void *dummy ATTRIBUTE_UNUSED
)
24597 segment_info_type
*seginfo
= seg_info (sec
);
24600 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24603 for (fragp
= seginfo
->frchainP
->frch_root
;
24605 fragp
= fragp
->fr_next
)
24607 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24608 fragS
*next
= fragp
->fr_next
;
24610 /* Variable-sized frags have been converted to fixed size by
24611 this point. But if this was variable-sized to start with,
24612 there will be a fixed-size frag after it. So don't handle
24614 if (sym
== NULL
|| next
== NULL
)
24617 if (S_GET_VALUE (sym
) < next
->fr_address
)
24618 /* Not at the end of this frag. */
24620 know (S_GET_VALUE (sym
) == next
->fr_address
);
24624 if (next
->tc_frag_data
.first_map
!= NULL
)
24626 /* Next frag starts with a mapping symbol. Discard this
24628 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24632 if (next
->fr_next
== NULL
)
24634 /* This mapping symbol is at the end of the section. Discard
24636 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24637 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24641 /* As long as we have empty frags without any mapping symbols,
24643 /* If the next frag is non-empty and does not start with a
24644 mapping symbol, then this mapping symbol is required. */
24645 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24648 next
= next
->fr_next
;
24650 while (next
!= NULL
);
24655 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24659 arm_adjust_symtab (void)
24664 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24666 if (ARM_IS_THUMB (sym
))
24668 if (THUMB_IS_FUNC (sym
))
24670 /* Mark the symbol as a Thumb function. */
24671 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24672 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24673 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
24675 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
24676 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
24678 as_bad (_("%s: unexpected function type: %d"),
24679 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
24681 else switch (S_GET_STORAGE_CLASS (sym
))
24684 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
24687 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
24690 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
24698 if (ARM_IS_INTERWORK (sym
))
24699 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
24706 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24708 if (ARM_IS_THUMB (sym
))
24710 elf_symbol_type
* elf_sym
;
24712 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
24713 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
24715 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
24716 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
24718 /* If it's a .thumb_func, declare it as so,
24719 otherwise tag label as .code 16. */
24720 if (THUMB_IS_FUNC (sym
))
24721 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
24722 ST_BRANCH_TO_THUMB
);
24723 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
24724 elf_sym
->internal_elf_sym
.st_info
=
24725 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
24730 /* Remove any overlapping mapping symbols generated by alignment frags. */
24731 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
24732 /* Now do generic ELF adjustments. */
24733 elf_adjust_symtab ();
24737 /* MD interface: Initialization. */
24740 set_constant_flonums (void)
24744 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
24745 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
24749 /* Auto-select Thumb mode if it's the only available instruction set for the
24750 given architecture. */
24753 autoselect_thumb_from_cpu_variant (void)
24755 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
24756 opcode_select (16);
24765 if ( (arm_ops_hsh
= hash_new ()) == NULL
24766 || (arm_cond_hsh
= hash_new ()) == NULL
24767 || (arm_shift_hsh
= hash_new ()) == NULL
24768 || (arm_psr_hsh
= hash_new ()) == NULL
24769 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
24770 || (arm_reg_hsh
= hash_new ()) == NULL
24771 || (arm_reloc_hsh
= hash_new ()) == NULL
24772 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
24773 as_fatal (_("virtual memory exhausted"));
24775 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
24776 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
24777 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
24778 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
24779 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
24780 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
24781 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
24782 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
24783 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
24784 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
24785 (void *) (v7m_psrs
+ i
));
24786 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
24787 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
24789 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
24791 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
24792 (void *) (barrier_opt_names
+ i
));
24794 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
24796 struct reloc_entry
* entry
= reloc_names
+ i
;
24798 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
24799 /* This makes encode_branch() use the EABI versions of this relocation. */
24800 entry
->reloc
= BFD_RELOC_UNUSED
;
24802 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
24806 set_constant_flonums ();
24808 /* Set the cpu variant based on the command-line options. We prefer
24809 -mcpu= over -march= if both are set (as for GCC); and we prefer
24810 -mfpu= over any other way of setting the floating point unit.
24811 Use of legacy options with new options are faulted. */
24814 if (mcpu_cpu_opt
|| march_cpu_opt
)
24815 as_bad (_("use of old and new-style options to set CPU type"));
24817 mcpu_cpu_opt
= legacy_cpu
;
24819 else if (!mcpu_cpu_opt
)
24820 mcpu_cpu_opt
= march_cpu_opt
;
24825 as_bad (_("use of old and new-style options to set FPU type"));
24827 mfpu_opt
= legacy_fpu
;
24829 else if (!mfpu_opt
)
24831 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24832 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24833 /* Some environments specify a default FPU. If they don't, infer it
24834 from the processor. */
24836 mfpu_opt
= mcpu_fpu_opt
;
24838 mfpu_opt
= march_fpu_opt
;
24840 mfpu_opt
= &fpu_default
;
24846 if (mcpu_cpu_opt
!= NULL
)
24847 mfpu_opt
= &fpu_default
;
24848 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
24849 mfpu_opt
= &fpu_arch_vfp_v2
;
24851 mfpu_opt
= &fpu_arch_fpa
;
24857 mcpu_cpu_opt
= &cpu_default
;
24858 selected_cpu
= cpu_default
;
24860 else if (no_cpu_selected ())
24861 selected_cpu
= cpu_default
;
24864 selected_cpu
= *mcpu_cpu_opt
;
24866 mcpu_cpu_opt
= &arm_arch_any
;
24869 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
24871 autoselect_thumb_from_cpu_variant ();
24873 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
24875 #if defined OBJ_COFF || defined OBJ_ELF
24877 unsigned int flags
= 0;
24879 #if defined OBJ_ELF
24880 flags
= meabi_flags
;
24882 switch (meabi_flags
)
24884 case EF_ARM_EABI_UNKNOWN
:
24886 /* Set the flags in the private structure. */
24887 if (uses_apcs_26
) flags
|= F_APCS26
;
24888 if (support_interwork
) flags
|= F_INTERWORK
;
24889 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
24890 if (pic_code
) flags
|= F_PIC
;
24891 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
24892 flags
|= F_SOFT_FLOAT
;
24894 switch (mfloat_abi_opt
)
24896 case ARM_FLOAT_ABI_SOFT
:
24897 case ARM_FLOAT_ABI_SOFTFP
:
24898 flags
|= F_SOFT_FLOAT
;
24901 case ARM_FLOAT_ABI_HARD
:
24902 if (flags
& F_SOFT_FLOAT
)
24903 as_bad (_("hard-float conflicts with specified fpu"));
24907 /* Using pure-endian doubles (even if soft-float). */
24908 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
24909 flags
|= F_VFP_FLOAT
;
24911 #if defined OBJ_ELF
24912 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
24913 flags
|= EF_ARM_MAVERICK_FLOAT
;
24916 case EF_ARM_EABI_VER4
:
24917 case EF_ARM_EABI_VER5
:
24918 /* No additional flags to set. */
24925 bfd_set_private_flags (stdoutput
, flags
);
24927 /* We have run out flags in the COFF header to encode the
24928 status of ATPCS support, so instead we create a dummy,
24929 empty, debug section called .arm.atpcs. */
24934 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
24938 bfd_set_section_flags
24939 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
24940 bfd_set_section_size (stdoutput
, sec
, 0);
24941 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
24947 /* Record the CPU type as well. */
24948 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
24949 mach
= bfd_mach_arm_iWMMXt2
;
24950 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
24951 mach
= bfd_mach_arm_iWMMXt
;
24952 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
24953 mach
= bfd_mach_arm_XScale
;
24954 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
24955 mach
= bfd_mach_arm_ep9312
;
24956 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
24957 mach
= bfd_mach_arm_5TE
;
24958 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
24960 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24961 mach
= bfd_mach_arm_5T
;
24963 mach
= bfd_mach_arm_5
;
24965 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
24967 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24968 mach
= bfd_mach_arm_4T
;
24970 mach
= bfd_mach_arm_4
;
24972 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
24973 mach
= bfd_mach_arm_3M
;
24974 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
24975 mach
= bfd_mach_arm_3
;
24976 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
24977 mach
= bfd_mach_arm_2a
;
24978 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
24979 mach
= bfd_mach_arm_2
;
24981 mach
= bfd_mach_arm_unknown
;
24983 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
24986 /* Command line processing. */
24989 Invocation line includes a switch not recognized by the base assembler.
24990 See if it's a processor-specific option.
24992 This routine is somewhat complicated by the need for backwards
24993 compatibility (since older releases of gcc can't be changed).
24994 The new options try to make the interface as compatible as
24997 New options (supported) are:
24999 -mcpu=<cpu name> Assemble for selected processor
25000 -march=<architecture name> Assemble for selected architecture
25001 -mfpu=<fpu architecture> Assemble for selected FPU.
25002 -EB/-mbig-endian Big-endian
25003 -EL/-mlittle-endian Little-endian
25004 -k Generate PIC code
25005 -mthumb Start in Thumb mode
25006 -mthumb-interwork Code supports ARM/Thumb interworking
25008 -m[no-]warn-deprecated Warn about deprecated features
25009 -m[no-]warn-syms Warn when symbols match instructions
25011 For now we will also provide support for:
25013 -mapcs-32 32-bit Program counter
25014 -mapcs-26 26-bit Program counter
25015 -macps-float Floats passed in FP registers
25016 -mapcs-reentrant Reentrant code
25018 (sometime these will probably be replaced with -mapcs=<list of options>
25019 and -matpcs=<list of options>)
25021 The remaining options are only supported for back-wards compatibility.
25022 Cpu variants, the arm part is optional:
25023 -m[arm]1 Currently not supported.
25024 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25025 -m[arm]3 Arm 3 processor
25026 -m[arm]6[xx], Arm 6 processors
25027 -m[arm]7[xx][t][[d]m] Arm 7 processors
25028 -m[arm]8[10] Arm 8 processors
25029 -m[arm]9[20][tdmi] Arm 9 processors
25030 -mstrongarm[110[0]] StrongARM processors
25031 -mxscale XScale processors
25032 -m[arm]v[2345[t[e]]] Arm architectures
25033 -mall All (except the ARM1)
25035 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25036 -mfpe-old (No float load/store multiples)
25037 -mvfpxd VFP Single precision
25039 -mno-fpu Disable all floating point instructions
25041 The following CPU names are recognized:
25042 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25043 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25044 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25045 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25046 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25047 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25048 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25052 const char * md_shortopts
= "m:k";
25054 #ifdef ARM_BI_ENDIAN
25055 #define OPTION_EB (OPTION_MD_BASE + 0)
25056 #define OPTION_EL (OPTION_MD_BASE + 1)
25058 #if TARGET_BYTES_BIG_ENDIAN
25059 #define OPTION_EB (OPTION_MD_BASE + 0)
25061 #define OPTION_EL (OPTION_MD_BASE + 1)
25064 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25066 struct option md_longopts
[] =
25069 {"EB", no_argument
, NULL
, OPTION_EB
},
25072 {"EL", no_argument
, NULL
, OPTION_EL
},
25074 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
25075 {NULL
, no_argument
, NULL
, 0}
25079 size_t md_longopts_size
= sizeof (md_longopts
);
25081 struct arm_option_table
25083 const char *option
; /* Option name to match. */
25084 const char *help
; /* Help information. */
25085 int *var
; /* Variable to change. */
25086 int value
; /* What to change it to. */
25087 const char *deprecated
; /* If non-null, print this message. */
25090 struct arm_option_table arm_opts
[] =
25092 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
25093 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
25094 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25095 &support_interwork
, 1, NULL
},
25096 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
25097 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
25098 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
25100 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
25101 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
25102 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
25103 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
25106 /* These are recognized by the assembler, but have no affect on code. */
25107 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
25108 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
25110 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
25111 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25112 &warn_on_deprecated
, 0, NULL
},
25113 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
25114 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
25115 {NULL
, NULL
, NULL
, 0, NULL
}
25118 struct arm_legacy_option_table
25120 const char *option
; /* Option name to match. */
25121 const arm_feature_set
**var
; /* Variable to change. */
25122 const arm_feature_set value
; /* What to change it to. */
25123 const char *deprecated
; /* If non-null, print this message. */
25126 const struct arm_legacy_option_table arm_legacy_opts
[] =
25128 /* DON'T add any new processors to this list -- we want the whole list
25129 to go away... Add them to the processors table instead. */
25130 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25131 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25132 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25133 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25134 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25135 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25136 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25137 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25138 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25139 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25140 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25141 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25142 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25143 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25144 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25145 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25146 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25147 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25148 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25149 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25150 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25151 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25152 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25153 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25154 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25155 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25156 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25157 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25158 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25159 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25160 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25161 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25162 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25163 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25164 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25165 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25166 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25167 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25168 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25169 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25170 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25171 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25172 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25173 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25174 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25175 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25176 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25177 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25178 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25179 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25180 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25181 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25182 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25183 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25184 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25185 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25186 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25187 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25188 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25189 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25190 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25191 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25192 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25193 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25194 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25195 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25196 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25197 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25198 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
25199 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
25200 N_("use -mcpu=strongarm110")},
25201 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
25202 N_("use -mcpu=strongarm1100")},
25203 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
25204 N_("use -mcpu=strongarm1110")},
25205 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
25206 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
25207 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
25209 /* Architecture variants -- don't add any more to this list either. */
25210 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25211 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25212 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25213 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25214 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25215 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25216 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25217 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25218 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25219 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25220 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25221 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25222 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25223 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25224 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25225 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25226 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25227 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25229 /* Floating point variants -- don't add any more to this list either. */
25230 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
25231 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
25232 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
25233 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
25234 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25236 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
25239 struct arm_cpu_option_table
25243 const arm_feature_set value
;
25244 /* For some CPUs we assume an FPU unless the user explicitly sets
25246 const arm_feature_set default_fpu
;
25247 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25249 const char *canonical_name
;
25252 /* This list should, at a minimum, contain all the cpu names
25253 recognized by GCC. */
25254 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25255 static const struct arm_cpu_option_table arm_cpus
[] =
25257 ARM_CPU_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
, NULL
),
25258 ARM_CPU_OPT ("arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
),
25259 ARM_CPU_OPT ("arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
),
25260 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
25261 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
25262 ARM_CPU_OPT ("arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25263 ARM_CPU_OPT ("arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25264 ARM_CPU_OPT ("arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25265 ARM_CPU_OPT ("arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25266 ARM_CPU_OPT ("arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25267 ARM_CPU_OPT ("arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25268 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25269 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25270 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25271 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25272 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
25273 ARM_CPU_OPT ("arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25274 ARM_CPU_OPT ("arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25275 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25276 ARM_CPU_OPT ("arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25277 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25278 ARM_CPU_OPT ("arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25279 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25280 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25281 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25282 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25283 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25284 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
25285 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25286 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25287 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25288 ARM_CPU_OPT ("arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25289 ARM_CPU_OPT ("arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25290 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25291 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25292 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25293 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25294 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25295 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25296 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"),
25297 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25298 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25299 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25300 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
25301 ARM_CPU_OPT ("fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25302 ARM_CPU_OPT ("fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
25303 /* For V5 or later processors we default to using VFP; but the user
25304 should really set the FPU type explicitly. */
25305 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25306 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25307 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
25308 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
25309 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
25310 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25311 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"),
25312 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25313 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
25314 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"),
25315 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25316 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25317 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25318 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25319 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25320 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"),
25321 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
25322 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25323 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25324 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
,
25326 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
25327 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25328 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25329 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25330 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25331 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
25332 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"),
25333 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
),
25334 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
,
25336 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
),
25337 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"),
25338 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"),
25339 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
),
25340 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
),
25341 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ
, FPU_NONE
, NULL
),
25342 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ
, FPU_ARCH_VFP_V2
, NULL
),
25343 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC
,
25344 FPU_NONE
, "Cortex-A5"),
25345 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25347 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC
,
25348 ARM_FEATURE_COPROC (FPU_VFP_V3
25349 | FPU_NEON_EXT_V1
),
25351 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC
,
25352 ARM_FEATURE_COPROC (FPU_VFP_V3
25353 | FPU_NEON_EXT_V1
),
25355 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25357 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25359 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
25361 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25363 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25365 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25367 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25369 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25371 ARM_CPU_OPT ("cortex-a73", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25373 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"),
25374 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
25376 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV
,
25377 FPU_NONE
, "Cortex-R5"),
25378 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV
,
25379 FPU_ARCH_VFP_V3D16
,
25381 ARM_CPU_OPT ("cortex-r8", ARM_ARCH_V7R_IDIV
,
25382 FPU_ARCH_VFP_V3D16
,
25384 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M7"),
25385 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"),
25386 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"),
25387 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"),
25388 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"),
25389 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0+"),
25390 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25393 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25397 /* ??? XSCALE is really an architecture. */
25398 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25399 /* ??? iwmmxt is not a processor. */
25400 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
),
25401 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
),
25402 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25404 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
25405 FPU_ARCH_MAVERICK
, "ARM920T"),
25406 /* Marvell processors. */
25407 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25409 ARM_EXT2_V6T2_V8M
),
25410 FPU_ARCH_VFP_V3D16
, NULL
),
25411 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25413 ARM_EXT2_V6T2_V8M
),
25414 FPU_ARCH_NEON_VFP_V4
, NULL
),
25415 /* APM X-Gene family. */
25416 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25418 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A_CRC
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25421 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
25425 struct arm_arch_option_table
25429 const arm_feature_set value
;
25430 const arm_feature_set default_fpu
;
25433 /* This list should, at a minimum, contain all the architecture names
25434 recognized by GCC. */
25435 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25436 static const struct arm_arch_option_table arm_archs
[] =
25438 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
25439 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
25440 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
25441 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25442 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25443 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
25444 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
25445 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
25446 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
25447 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
25448 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
25449 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
25450 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
25451 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
25452 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
25453 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
25454 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
25455 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25456 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25457 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
25458 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
25459 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25460 kept to preserve existing behaviour. */
25461 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25462 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25463 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
25464 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
25465 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
25466 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25467 kept to preserve existing behaviour. */
25468 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25469 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25470 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
25471 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
25472 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
25473 /* The official spelling of the ARMv7 profile variants is the dashed form.
25474 Accept the non-dashed form for compatibility with old toolchains. */
25475 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25476 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
25477 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25478 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25479 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25480 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25481 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25482 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
25483 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
25484 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
25485 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
25486 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
25487 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
25488 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
25489 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
25490 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
25491 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25493 #undef ARM_ARCH_OPT
25495 /* ISA extensions in the co-processor and main instruction set space. */
25496 struct arm_option_extension_value_table
25500 const arm_feature_set merge_value
;
25501 const arm_feature_set clear_value
;
25502 /* List of architectures for which an extension is available. ARM_ARCH_NONE
25503 indicates that an extension is available for all architectures while
25504 ARM_ANY marks an empty entry. */
25505 const arm_feature_set allowed_archs
[2];
25508 /* The following table must be in alphabetical order with a NULL last entry.
25510 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25511 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25512 static const struct arm_option_extension_value_table arm_extensions
[] =
25514 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25515 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25516 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25517 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
25518 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25519 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25520 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25521 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
25522 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
25523 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25524 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25525 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25527 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25528 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25529 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
25530 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
25531 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
25532 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
25533 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
25534 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
25535 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
25536 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
25537 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25538 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25539 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
25540 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
25541 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25542 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25543 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
25544 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
25545 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
25546 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25547 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
25548 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
25549 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25550 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
25551 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
25552 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25553 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25554 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25555 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
25556 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25557 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
25558 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
25559 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25560 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
25562 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
25563 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25564 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
25565 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
25566 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
25570 /* ISA floating-point and Advanced SIMD extensions. */
25571 struct arm_option_fpu_value_table
25574 const arm_feature_set value
;
25577 /* This list should, at a minimum, contain all the fpu names
25578 recognized by GCC. */
25579 static const struct arm_option_fpu_value_table arm_fpus
[] =
25581 {"softfpa", FPU_NONE
},
25582 {"fpe", FPU_ARCH_FPE
},
25583 {"fpe2", FPU_ARCH_FPE
},
25584 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
25585 {"fpa", FPU_ARCH_FPA
},
25586 {"fpa10", FPU_ARCH_FPA
},
25587 {"fpa11", FPU_ARCH_FPA
},
25588 {"arm7500fe", FPU_ARCH_FPA
},
25589 {"softvfp", FPU_ARCH_VFP
},
25590 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
25591 {"vfp", FPU_ARCH_VFP_V2
},
25592 {"vfp9", FPU_ARCH_VFP_V2
},
25593 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
25594 {"vfp10", FPU_ARCH_VFP_V2
},
25595 {"vfp10-r0", FPU_ARCH_VFP_V1
},
25596 {"vfpxd", FPU_ARCH_VFP_V1xD
},
25597 {"vfpv2", FPU_ARCH_VFP_V2
},
25598 {"vfpv3", FPU_ARCH_VFP_V3
},
25599 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
25600 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
25601 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
25602 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
25603 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
25604 {"arm1020t", FPU_ARCH_VFP_V1
},
25605 {"arm1020e", FPU_ARCH_VFP_V2
},
25606 {"arm1136jfs", FPU_ARCH_VFP_V2
},
25607 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
25608 {"maverick", FPU_ARCH_MAVERICK
},
25609 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
25610 {"neon-fp16", FPU_ARCH_NEON_FP16
},
25611 {"vfpv4", FPU_ARCH_VFP_V4
},
25612 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
25613 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
25614 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
25615 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
25616 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
25617 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
25618 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
25619 {"crypto-neon-fp-armv8",
25620 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
25621 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
25622 {"crypto-neon-fp-armv8.1",
25623 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
25624 {NULL
, ARM_ARCH_NONE
}
25627 struct arm_option_value_table
25633 static const struct arm_option_value_table arm_float_abis
[] =
25635 {"hard", ARM_FLOAT_ABI_HARD
},
25636 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
25637 {"soft", ARM_FLOAT_ABI_SOFT
},
25642 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25643 static const struct arm_option_value_table arm_eabis
[] =
25645 {"gnu", EF_ARM_EABI_UNKNOWN
},
25646 {"4", EF_ARM_EABI_VER4
},
25647 {"5", EF_ARM_EABI_VER5
},
25652 struct arm_long_option_table
25654 const char * option
; /* Substring to match. */
25655 const char * help
; /* Help information. */
25656 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
25657 const char * deprecated
; /* If non-null, print this message. */
25661 arm_parse_extension (const char *str
, const arm_feature_set
**opt_p
)
25663 arm_feature_set
*ext_set
= XNEW (arm_feature_set
);
25665 /* We insist on extensions being specified in alphabetical order, and with
25666 extensions being added before being removed. We achieve this by having
25667 the global ARM_EXTENSIONS table in alphabetical order, and using the
25668 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25669 or removing it (0) and only allowing it to change in the order
25671 const struct arm_option_extension_value_table
* opt
= NULL
;
25672 const arm_feature_set arm_any
= ARM_ANY
;
25673 int adding_value
= -1;
25675 /* Copy the feature set, so that we can modify it. */
25676 *ext_set
= **opt_p
;
25679 while (str
!= NULL
&& *str
!= 0)
25686 as_bad (_("invalid architectural extension"));
25691 ext
= strchr (str
, '+');
25696 len
= strlen (str
);
25698 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
25700 if (adding_value
!= 0)
25703 opt
= arm_extensions
;
25711 if (adding_value
== -1)
25714 opt
= arm_extensions
;
25716 else if (adding_value
!= 1)
25718 as_bad (_("must specify extensions to add before specifying "
25719 "those to remove"));
25726 as_bad (_("missing architectural extension"));
25730 gas_assert (adding_value
!= -1);
25731 gas_assert (opt
!= NULL
);
25733 /* Scan over the options table trying to find an exact match. */
25734 for (; opt
->name
!= NULL
; opt
++)
25735 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25737 int i
, nb_allowed_archs
=
25738 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
25739 /* Check we can apply the extension to this architecture. */
25740 for (i
= 0; i
< nb_allowed_archs
; i
++)
25743 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
25745 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *ext_set
))
25748 if (i
== nb_allowed_archs
)
25750 as_bad (_("extension does not apply to the base architecture"));
25754 /* Add or remove the extension. */
25756 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
25758 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
25763 if (opt
->name
== NULL
)
25765 /* Did we fail to find an extension because it wasn't specified in
25766 alphabetical order, or because it does not exist? */
25768 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
25769 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25772 if (opt
->name
== NULL
)
25773 as_bad (_("unknown architectural extension `%s'"), str
);
25775 as_bad (_("architectural extensions must be specified in "
25776 "alphabetical order"));
25782 /* We should skip the extension we've just matched the next time
25794 arm_parse_cpu (const char *str
)
25796 const struct arm_cpu_option_table
*opt
;
25797 const char *ext
= strchr (str
, '+');
25803 len
= strlen (str
);
25807 as_bad (_("missing cpu name `%s'"), str
);
25811 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
25812 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25814 mcpu_cpu_opt
= &opt
->value
;
25815 mcpu_fpu_opt
= &opt
->default_fpu
;
25816 if (opt
->canonical_name
)
25818 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
25819 strcpy (selected_cpu_name
, opt
->canonical_name
);
25825 if (len
>= sizeof selected_cpu_name
)
25826 len
= (sizeof selected_cpu_name
) - 1;
25828 for (i
= 0; i
< len
; i
++)
25829 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25830 selected_cpu_name
[i
] = 0;
25834 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
25839 as_bad (_("unknown cpu `%s'"), str
);
25844 arm_parse_arch (const char *str
)
25846 const struct arm_arch_option_table
*opt
;
25847 const char *ext
= strchr (str
, '+');
25853 len
= strlen (str
);
25857 as_bad (_("missing architecture name `%s'"), str
);
25861 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
25862 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25864 march_cpu_opt
= &opt
->value
;
25865 march_fpu_opt
= &opt
->default_fpu
;
25866 strcpy (selected_cpu_name
, opt
->name
);
25869 return arm_parse_extension (ext
, &march_cpu_opt
);
25874 as_bad (_("unknown architecture `%s'\n"), str
);
25879 arm_parse_fpu (const char * str
)
25881 const struct arm_option_fpu_value_table
* opt
;
25883 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
25884 if (streq (opt
->name
, str
))
25886 mfpu_opt
= &opt
->value
;
25890 as_bad (_("unknown floating point format `%s'\n"), str
);
25895 arm_parse_float_abi (const char * str
)
25897 const struct arm_option_value_table
* opt
;
25899 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
25900 if (streq (opt
->name
, str
))
25902 mfloat_abi_opt
= opt
->value
;
25906 as_bad (_("unknown floating point abi `%s'\n"), str
);
25912 arm_parse_eabi (const char * str
)
25914 const struct arm_option_value_table
*opt
;
25916 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
25917 if (streq (opt
->name
, str
))
25919 meabi_flags
= opt
->value
;
25922 as_bad (_("unknown EABI `%s'\n"), str
);
25928 arm_parse_it_mode (const char * str
)
25930 bfd_boolean ret
= TRUE
;
25932 if (streq ("arm", str
))
25933 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
25934 else if (streq ("thumb", str
))
25935 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
25936 else if (streq ("always", str
))
25937 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
25938 else if (streq ("never", str
))
25939 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
25942 as_bad (_("unknown implicit IT mode `%s', should be "\
25943 "arm, thumb, always, or never."), str
);
25951 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
25953 codecomposer_syntax
= TRUE
;
25954 arm_comment_chars
[0] = ';';
25955 arm_line_separator_chars
[0] = 0;
25959 struct arm_long_option_table arm_long_opts
[] =
25961 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25962 arm_parse_cpu
, NULL
},
25963 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25964 arm_parse_arch
, NULL
},
25965 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25966 arm_parse_fpu
, NULL
},
25967 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25968 arm_parse_float_abi
, NULL
},
25970 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25971 arm_parse_eabi
, NULL
},
25973 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25974 arm_parse_it_mode
, NULL
},
25975 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25976 arm_ccs_mode
, NULL
},
25977 {NULL
, NULL
, 0, NULL
}
25981 md_parse_option (int c
, const char * arg
)
25983 struct arm_option_table
*opt
;
25984 const struct arm_legacy_option_table
*fopt
;
25985 struct arm_long_option_table
*lopt
;
25991 target_big_endian
= 1;
25997 target_big_endian
= 0;
26001 case OPTION_FIX_V4BX
:
26006 /* Listing option. Just ignore these, we don't support additional
26011 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26013 if (c
== opt
->option
[0]
26014 && ((arg
== NULL
&& opt
->option
[1] == 0)
26015 || streq (arg
, opt
->option
+ 1)))
26017 /* If the option is deprecated, tell the user. */
26018 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
26019 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26020 arg
? arg
: "", _(opt
->deprecated
));
26022 if (opt
->var
!= NULL
)
26023 *opt
->var
= opt
->value
;
26029 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
26031 if (c
== fopt
->option
[0]
26032 && ((arg
== NULL
&& fopt
->option
[1] == 0)
26033 || streq (arg
, fopt
->option
+ 1)))
26035 /* If the option is deprecated, tell the user. */
26036 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
26037 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26038 arg
? arg
: "", _(fopt
->deprecated
));
26040 if (fopt
->var
!= NULL
)
26041 *fopt
->var
= &fopt
->value
;
26047 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26049 /* These options are expected to have an argument. */
26050 if (c
== lopt
->option
[0]
26052 && strncmp (arg
, lopt
->option
+ 1,
26053 strlen (lopt
->option
+ 1)) == 0)
26055 /* If the option is deprecated, tell the user. */
26056 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
26057 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
26058 _(lopt
->deprecated
));
26060 /* Call the sup-option parser. */
26061 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
26072 md_show_usage (FILE * fp
)
26074 struct arm_option_table
*opt
;
26075 struct arm_long_option_table
*lopt
;
26077 fprintf (fp
, _(" ARM-specific assembler options:\n"));
26079 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26080 if (opt
->help
!= NULL
)
26081 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
26083 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26084 if (lopt
->help
!= NULL
)
26085 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
26089 -EB assemble code for a big-endian cpu\n"));
26094 -EL assemble code for a little-endian cpu\n"));
26098 --fix-v4bx Allow BX in ARMv4 code\n"));
26106 arm_feature_set flags
;
26107 } cpu_arch_ver_table
;
26109 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
26110 must be sorted least features first but some reordering is needed, eg. for
26111 Thumb-2 instructions to be detected as coming from ARMv6T2. */
26112 static const cpu_arch_ver_table cpu_arch_ver
[] =
26118 {4, ARM_ARCH_V5TE
},
26119 {5, ARM_ARCH_V5TEJ
},
26123 {11, ARM_ARCH_V6M
},
26124 {12, ARM_ARCH_V6SM
},
26125 {8, ARM_ARCH_V6T2
},
26126 {10, ARM_ARCH_V7VE
},
26127 {10, ARM_ARCH_V7R
},
26128 {10, ARM_ARCH_V7M
},
26129 {14, ARM_ARCH_V8A
},
26130 {16, ARM_ARCH_V8M_BASE
},
26131 {17, ARM_ARCH_V8M_MAIN
},
26135 /* Set an attribute if it has not already been set by the user. */
26137 aeabi_set_attribute_int (int tag
, int value
)
26140 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26141 || !attributes_set_explicitly
[tag
])
26142 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
26146 aeabi_set_attribute_string (int tag
, const char *value
)
26149 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26150 || !attributes_set_explicitly
[tag
])
26151 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
26154 /* Set the public EABI object attributes. */
26156 aeabi_set_public_attributes (void)
26161 int fp16_optional
= 0;
26162 arm_feature_set arm_arch
= ARM_ARCH_NONE
;
26163 arm_feature_set flags
;
26164 arm_feature_set tmp
;
26165 arm_feature_set arm_arch_v8m_base
= ARM_ARCH_V8M_BASE
;
26166 const cpu_arch_ver_table
*p
;
26168 /* Choose the architecture based on the capabilities of the requested cpu
26169 (if any) and/or the instructions actually used. */
26170 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
26171 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
26172 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
26174 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
26175 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
26177 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
26178 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
26180 selected_cpu
= flags
;
26182 /* Allow the user to override the reported architecture. */
26185 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
26186 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
26189 /* We need to make sure that the attributes do not identify us as v6S-M
26190 when the only v6S-M feature in use is the Operating System Extensions. */
26191 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
26192 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
26193 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
26197 for (p
= cpu_arch_ver
; p
->val
; p
++)
26199 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
26202 arm_arch
= p
->flags
;
26203 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
26207 /* The table lookup above finds the last architecture to contribute
26208 a new feature. Unfortunately, Tag13 is a subset of the union of
26209 v6T2 and v7-M, so it is never seen as contributing a new feature.
26210 We can not search for the last entry which is entirely used,
26211 because if no CPU is specified we build up only those flags
26212 actually used. Perhaps we should separate out the specified
26213 and implicit cases. Avoid taking this path for -march=all by
26214 checking for contradictory v7-A / v7-M features. */
26215 if (arch
== TAG_CPU_ARCH_V7
26216 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26217 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
26218 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
26220 arch
= TAG_CPU_ARCH_V7E_M
;
26221 arm_arch
= (arm_feature_set
) ARM_ARCH_V7EM
;
26224 ARM_CLEAR_FEATURE (tmp
, flags
, arm_arch_v8m_base
);
26225 if (arch
== TAG_CPU_ARCH_V8M_BASE
&& ARM_CPU_HAS_FEATURE (tmp
, arm_arch_any
))
26227 arch
= TAG_CPU_ARCH_V8M_MAIN
;
26228 arm_arch
= (arm_feature_set
) ARM_ARCH_V8M_MAIN
;
26231 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26232 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26233 ARMv8-M, -march=all must be detected as ARMv8-A. */
26234 if (arch
== TAG_CPU_ARCH_V8M_MAIN
26235 && ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
26237 arch
= TAG_CPU_ARCH_V8
;
26238 arm_arch
= (arm_feature_set
) ARM_ARCH_V8A
;
26241 /* Tag_CPU_name. */
26242 if (selected_cpu_name
[0])
26246 q
= selected_cpu_name
;
26247 if (strncmp (q
, "armv", 4) == 0)
26252 for (i
= 0; q
[i
]; i
++)
26253 q
[i
] = TOUPPER (q
[i
]);
26255 aeabi_set_attribute_string (Tag_CPU_name
, q
);
26258 /* Tag_CPU_arch. */
26259 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
26261 /* Tag_CPU_arch_profile. */
26262 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26263 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26264 || (ARM_CPU_HAS_FEATURE (flags
, arm_ext_atomics
)
26265 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
)))
26267 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
26269 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
26274 if (profile
!= '\0')
26275 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
26277 /* Tag_DSP_extension. */
26278 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_dsp
))
26280 arm_feature_set ext
;
26282 /* DSP instructions not in architecture. */
26283 ARM_CLEAR_FEATURE (ext
, flags
, arm_arch
);
26284 if (ARM_CPU_HAS_FEATURE (ext
, arm_ext_dsp
))
26285 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
26288 /* Tag_ARM_ISA_use. */
26289 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
26291 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
26293 /* Tag_THUMB_ISA_use. */
26294 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
26299 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26300 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
26302 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
26306 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
26309 /* Tag_VFP_arch. */
26310 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
26311 aeabi_set_attribute_int (Tag_VFP_arch
,
26312 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26314 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
26315 aeabi_set_attribute_int (Tag_VFP_arch
,
26316 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26318 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
26321 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
26323 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
26325 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
26328 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
26329 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
26330 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
26331 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
26332 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
26334 /* Tag_ABI_HardFP_use. */
26335 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
26336 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
26337 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
26339 /* Tag_WMMX_arch. */
26340 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
26341 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
26342 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
26343 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
26345 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26346 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
26347 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
26348 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
26349 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
26350 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
26352 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
26354 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
26358 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
26363 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26364 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
26365 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
26369 We set Tag_DIV_use to two when integer divide instructions have been used
26370 in ARM state, or when Thumb integer divide instructions have been used,
26371 but we have no architecture profile set, nor have we any ARM instructions.
26373 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26374 by the base architecture.
26376 For new architectures we will have to check these tests. */
26377 gas_assert (arch
<= TAG_CPU_ARCH_V8
26378 || (arch
>= TAG_CPU_ARCH_V8M_BASE
26379 && arch
<= TAG_CPU_ARCH_V8M_MAIN
));
26380 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26381 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
26382 aeabi_set_attribute_int (Tag_DIV_use
, 0);
26383 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
26384 || (profile
== '\0'
26385 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
26386 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
26387 aeabi_set_attribute_int (Tag_DIV_use
, 2);
26389 /* Tag_MP_extension_use. */
26390 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
26391 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
26393 /* Tag Virtualization_use. */
26394 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
26396 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
26399 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
26402 /* Add the default contents for the .ARM.attributes section. */
26406 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26409 aeabi_set_public_attributes ();
26411 #endif /* OBJ_ELF */
26414 /* Parse a .cpu directive. */
26417 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
26419 const struct arm_cpu_option_table
*opt
;
26423 name
= input_line_pointer
;
26424 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26425 input_line_pointer
++;
26426 saved_char
= *input_line_pointer
;
26427 *input_line_pointer
= 0;
26429 /* Skip the first "all" entry. */
26430 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
26431 if (streq (opt
->name
, name
))
26433 mcpu_cpu_opt
= &opt
->value
;
26434 selected_cpu
= opt
->value
;
26435 if (opt
->canonical_name
)
26436 strcpy (selected_cpu_name
, opt
->canonical_name
);
26440 for (i
= 0; opt
->name
[i
]; i
++)
26441 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
26443 selected_cpu_name
[i
] = 0;
26445 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26446 *input_line_pointer
= saved_char
;
26447 demand_empty_rest_of_line ();
26450 as_bad (_("unknown cpu `%s'"), name
);
26451 *input_line_pointer
= saved_char
;
26452 ignore_rest_of_line ();
26456 /* Parse a .arch directive. */
26459 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
26461 const struct arm_arch_option_table
*opt
;
26465 name
= input_line_pointer
;
26466 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26467 input_line_pointer
++;
26468 saved_char
= *input_line_pointer
;
26469 *input_line_pointer
= 0;
26471 /* Skip the first "all" entry. */
26472 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26473 if (streq (opt
->name
, name
))
26475 mcpu_cpu_opt
= &opt
->value
;
26476 selected_cpu
= opt
->value
;
26477 strcpy (selected_cpu_name
, opt
->name
);
26478 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26479 *input_line_pointer
= saved_char
;
26480 demand_empty_rest_of_line ();
26484 as_bad (_("unknown architecture `%s'\n"), name
);
26485 *input_line_pointer
= saved_char
;
26486 ignore_rest_of_line ();
26490 /* Parse a .object_arch directive. */
26493 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
26495 const struct arm_arch_option_table
*opt
;
26499 name
= input_line_pointer
;
26500 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26501 input_line_pointer
++;
26502 saved_char
= *input_line_pointer
;
26503 *input_line_pointer
= 0;
26505 /* Skip the first "all" entry. */
26506 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26507 if (streq (opt
->name
, name
))
26509 object_arch
= &opt
->value
;
26510 *input_line_pointer
= saved_char
;
26511 demand_empty_rest_of_line ();
26515 as_bad (_("unknown architecture `%s'\n"), name
);
26516 *input_line_pointer
= saved_char
;
26517 ignore_rest_of_line ();
26520 /* Parse a .arch_extension directive. */
26523 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
26525 const struct arm_option_extension_value_table
*opt
;
26526 const arm_feature_set arm_any
= ARM_ANY
;
26529 int adding_value
= 1;
26531 name
= input_line_pointer
;
26532 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26533 input_line_pointer
++;
26534 saved_char
= *input_line_pointer
;
26535 *input_line_pointer
= 0;
26537 if (strlen (name
) >= 2
26538 && strncmp (name
, "no", 2) == 0)
26544 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26545 if (streq (opt
->name
, name
))
26547 int i
, nb_allowed_archs
=
26548 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
26549 for (i
= 0; i
< nb_allowed_archs
; i
++)
26552 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
26554 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *mcpu_cpu_opt
))
26558 if (i
== nb_allowed_archs
)
26560 as_bad (_("architectural extension `%s' is not allowed for the "
26561 "current base architecture"), name
);
26566 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
,
26569 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->clear_value
);
26571 mcpu_cpu_opt
= &selected_cpu
;
26572 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26573 *input_line_pointer
= saved_char
;
26574 demand_empty_rest_of_line ();
26578 if (opt
->name
== NULL
)
26579 as_bad (_("unknown architecture extension `%s'\n"), name
);
26581 *input_line_pointer
= saved_char
;
26582 ignore_rest_of_line ();
26585 /* Parse a .fpu directive. */
26588 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
26590 const struct arm_option_fpu_value_table
*opt
;
26594 name
= input_line_pointer
;
26595 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26596 input_line_pointer
++;
26597 saved_char
= *input_line_pointer
;
26598 *input_line_pointer
= 0;
26600 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26601 if (streq (opt
->name
, name
))
26603 mfpu_opt
= &opt
->value
;
26604 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26605 *input_line_pointer
= saved_char
;
26606 demand_empty_rest_of_line ();
26610 as_bad (_("unknown floating point format `%s'\n"), name
);
26611 *input_line_pointer
= saved_char
;
26612 ignore_rest_of_line ();
26615 /* Copy symbol information. */
26618 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
26620 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
26624 /* Given a symbolic attribute NAME, return the proper integer value.
26625 Returns -1 if the attribute is not known. */
26628 arm_convert_symbolic_attribute (const char *name
)
26630 static const struct
26635 attribute_table
[] =
26637 /* When you modify this table you should
26638 also modify the list in doc/c-arm.texi. */
26639 #define T(tag) {#tag, tag}
26640 T (Tag_CPU_raw_name
),
26643 T (Tag_CPU_arch_profile
),
26644 T (Tag_ARM_ISA_use
),
26645 T (Tag_THUMB_ISA_use
),
26649 T (Tag_Advanced_SIMD_arch
),
26650 T (Tag_PCS_config
),
26651 T (Tag_ABI_PCS_R9_use
),
26652 T (Tag_ABI_PCS_RW_data
),
26653 T (Tag_ABI_PCS_RO_data
),
26654 T (Tag_ABI_PCS_GOT_use
),
26655 T (Tag_ABI_PCS_wchar_t
),
26656 T (Tag_ABI_FP_rounding
),
26657 T (Tag_ABI_FP_denormal
),
26658 T (Tag_ABI_FP_exceptions
),
26659 T (Tag_ABI_FP_user_exceptions
),
26660 T (Tag_ABI_FP_number_model
),
26661 T (Tag_ABI_align_needed
),
26662 T (Tag_ABI_align8_needed
),
26663 T (Tag_ABI_align_preserved
),
26664 T (Tag_ABI_align8_preserved
),
26665 T (Tag_ABI_enum_size
),
26666 T (Tag_ABI_HardFP_use
),
26667 T (Tag_ABI_VFP_args
),
26668 T (Tag_ABI_WMMX_args
),
26669 T (Tag_ABI_optimization_goals
),
26670 T (Tag_ABI_FP_optimization_goals
),
26671 T (Tag_compatibility
),
26672 T (Tag_CPU_unaligned_access
),
26673 T (Tag_FP_HP_extension
),
26674 T (Tag_VFP_HP_extension
),
26675 T (Tag_ABI_FP_16bit_format
),
26676 T (Tag_MPextension_use
),
26678 T (Tag_nodefaults
),
26679 T (Tag_also_compatible_with
),
26680 T (Tag_conformance
),
26682 T (Tag_Virtualization_use
),
26683 T (Tag_DSP_extension
),
26684 /* We deliberately do not include Tag_MPextension_use_legacy. */
26692 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
26693 if (streq (name
, attribute_table
[i
].name
))
26694 return attribute_table
[i
].tag
;
26700 /* Apply sym value for relocations only in the case that they are for
26701 local symbols in the same segment as the fixup and you have the
26702 respective architectural feature for blx and simple switches. */
26704 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
26707 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
26708 /* PR 17444: If the local symbol is in a different section then a reloc
26709 will always be generated for it, so applying the symbol value now
26710 will result in a double offset being stored in the relocation. */
26711 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
26712 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
26714 switch (fixP
->fx_r_type
)
26716 case BFD_RELOC_ARM_PCREL_BLX
:
26717 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26718 if (ARM_IS_FUNC (fixP
->fx_addsy
))
26722 case BFD_RELOC_ARM_PCREL_CALL
:
26723 case BFD_RELOC_THUMB_PCREL_BLX
:
26724 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
26735 #endif /* OBJ_ELF */