1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
164 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
165 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
168 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
171 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
172 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
173 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
174 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
175 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
176 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
177 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
178 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
179 static const arm_feature_set arm_ext_v4t_5
=
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
181 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
182 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
183 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
184 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
185 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
186 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
188 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
189 static const arm_feature_set arm_ext_v6_notm
=
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
191 static const arm_feature_set arm_ext_v6_dsp
=
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
193 static const arm_feature_set arm_ext_barrier
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
195 static const arm_feature_set arm_ext_msr
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
197 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
198 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
199 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
200 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
201 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
202 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
203 static const arm_feature_set arm_ext_m
=
204 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
, ARM_EXT2_V8M
);
205 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
206 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
207 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
208 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
209 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
210 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
211 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
212 static const arm_feature_set arm_ext_v6t2_v8m
=
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics
=
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
217 static const arm_feature_set arm_ext_v8_2
=
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
219 /* FP16 instructions. */
220 static const arm_feature_set arm_ext_fp16
=
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
223 static const arm_feature_set arm_arch_any
= ARM_ANY
;
224 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1, -1);
225 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
226 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
227 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
229 static const arm_feature_set arm_cext_iwmmxt2
=
230 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
231 static const arm_feature_set arm_cext_iwmmxt
=
232 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
233 static const arm_feature_set arm_cext_xscale
=
234 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
235 static const arm_feature_set arm_cext_maverick
=
236 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
237 static const arm_feature_set fpu_fpa_ext_v1
=
238 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
239 static const arm_feature_set fpu_fpa_ext_v2
=
240 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
241 static const arm_feature_set fpu_vfp_ext_v1xd
=
242 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
243 static const arm_feature_set fpu_vfp_ext_v1
=
244 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
245 static const arm_feature_set fpu_vfp_ext_v2
=
246 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
247 static const arm_feature_set fpu_vfp_ext_v3xd
=
248 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
249 static const arm_feature_set fpu_vfp_ext_v3
=
250 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
251 static const arm_feature_set fpu_vfp_ext_d32
=
252 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
253 static const arm_feature_set fpu_neon_ext_v1
=
254 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
255 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
256 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
257 static const arm_feature_set fpu_vfp_fp16
=
258 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
259 static const arm_feature_set fpu_neon_ext_fma
=
260 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
261 static const arm_feature_set fpu_vfp_ext_fma
=
262 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
263 static const arm_feature_set fpu_vfp_ext_armv8
=
264 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
265 static const arm_feature_set fpu_vfp_ext_armv8xd
=
266 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
267 static const arm_feature_set fpu_neon_ext_armv8
=
268 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
269 static const arm_feature_set fpu_crypto_ext_armv8
=
270 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
271 static const arm_feature_set crc_ext_armv8
=
272 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
273 static const arm_feature_set fpu_neon_ext_v8_1
=
274 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
| FPU_NEON_EXT_RDMA
);
276 static int mfloat_abi_opt
= -1;
277 /* Record user cpu selection for object attributes. */
278 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
279 /* Must be long enough to hold any of the names in arm_cpus. */
280 static char selected_cpu_name
[20];
282 extern FLONUM_TYPE generic_floating_point_number
;
284 /* Return if no cpu was selected on command-line. */
286 no_cpu_selected (void)
288 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
293 static int meabi_flags
= EABI_DEFAULT
;
295 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
298 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
303 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
308 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
309 symbolS
* GOT_symbol
;
312 /* 0: assemble for ARM,
313 1: assemble for Thumb,
314 2: assemble for Thumb even though target CPU does not support thumb
316 static int thumb_mode
= 0;
317 /* A value distinct from the possible values for thumb_mode that we
318 can use to record whether thumb_mode has been copied into the
319 tc_frag_data field of a frag. */
320 #define MODE_RECORDED (1 << 4)
322 /* Specifies the intrinsic IT insn behavior mode. */
323 enum implicit_it_mode
325 IMPLICIT_IT_MODE_NEVER
= 0x00,
326 IMPLICIT_IT_MODE_ARM
= 0x01,
327 IMPLICIT_IT_MODE_THUMB
= 0x02,
328 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
330 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
332 /* If unified_syntax is true, we are processing the new unified
333 ARM/Thumb syntax. Important differences from the old ARM mode:
335 - Immediate operands do not require a # prefix.
336 - Conditional affixes always appear at the end of the
337 instruction. (For backward compatibility, those instructions
338 that formerly had them in the middle, continue to accept them
340 - The IT instruction may appear, and if it does is validated
341 against subsequent conditional affixes. It does not generate
344 Important differences from the old Thumb mode:
346 - Immediate operands do not require a # prefix.
347 - Most of the V6T2 instructions are only available in unified mode.
348 - The .N and .W suffixes are recognized and honored (it is an error
349 if they cannot be honored).
350 - All instructions set the flags if and only if they have an 's' affix.
351 - Conditional affixes may be used. They are validated against
352 preceding IT instructions. Unlike ARM mode, you cannot use a
353 conditional affix except in the scope of an IT instruction. */
355 static bfd_boolean unified_syntax
= FALSE
;
357 /* An immediate operand can start with #, and ld*, st*, pld operands
358 can contain [ and ]. We need to tell APP not to elide whitespace
359 before a [, which can appear as the first operand for pld.
360 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
361 const char arm_symbol_chars
[] = "#[]{}";
376 enum neon_el_type type
;
380 #define NEON_MAX_TYPE_ELS 4
384 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
388 enum it_instruction_type
393 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
394 if inside, should be the last one. */
395 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
396 i.e. BKPT and NOP. */
397 IT_INSN
/* The IT insn has been parsed. */
400 /* The maximum number of operands we need. */
401 #define ARM_IT_MAX_OPERANDS 6
406 unsigned long instruction
;
410 /* "uncond_value" is set to the value in place of the conditional field in
411 unconditional versions of the instruction, or -1 if nothing is
414 struct neon_type vectype
;
415 /* This does not indicate an actual NEON instruction, only that
416 the mnemonic accepts neon-style type suffixes. */
418 /* Set to the opcode if the instruction needs relaxation.
419 Zero if the instruction is not relaxed. */
423 bfd_reloc_code_real_type type
;
428 enum it_instruction_type it_insn_type
;
434 struct neon_type_el vectype
;
435 unsigned present
: 1; /* Operand present. */
436 unsigned isreg
: 1; /* Operand was a register. */
437 unsigned immisreg
: 1; /* .imm field is a second register. */
438 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
439 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
440 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
441 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
442 instructions. This allows us to disambiguate ARM <-> vector insns. */
443 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
444 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
445 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
446 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
447 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
448 unsigned writeback
: 1; /* Operand has trailing ! */
449 unsigned preind
: 1; /* Preindexed address. */
450 unsigned postind
: 1; /* Postindexed address. */
451 unsigned negative
: 1; /* Index register was negated. */
452 unsigned shifted
: 1; /* Shift applied to operation. */
453 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
454 } operands
[ARM_IT_MAX_OPERANDS
];
457 static struct arm_it inst
;
459 #define NUM_FLOAT_VALS 8
461 const char * fp_const
[] =
463 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
466 /* Number of littlenums required to hold an extended precision number. */
467 #define MAX_LITTLENUMS 6
469 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
479 #define CP_T_X 0x00008000
480 #define CP_T_Y 0x00400000
482 #define CONDS_BIT 0x00100000
483 #define LOAD_BIT 0x00100000
485 #define DOUBLE_LOAD_FLAG 0x00000001
489 const char * template_name
;
493 #define COND_ALWAYS 0xE
497 const char * template_name
;
501 struct asm_barrier_opt
503 const char * template_name
;
505 const arm_feature_set arch
;
508 /* The bit that distinguishes CPSR and SPSR. */
509 #define SPSR_BIT (1 << 22)
511 /* The individual PSR flag bits. */
512 #define PSR_c (1 << 16)
513 #define PSR_x (1 << 17)
514 #define PSR_s (1 << 18)
515 #define PSR_f (1 << 19)
520 bfd_reloc_code_real_type reloc
;
525 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
526 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
531 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
534 /* Bits for DEFINED field in neon_typed_alias. */
535 #define NTA_HASTYPE 1
536 #define NTA_HASINDEX 2
538 struct neon_typed_alias
540 unsigned char defined
;
542 struct neon_type_el eltype
;
545 /* ARM register categories. This includes coprocessor numbers and various
546 architecture extensions' registers. */
573 /* Structure for a hash table entry for a register.
574 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
575 information which states whether a vector type or index is specified (for a
576 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
582 unsigned char builtin
;
583 struct neon_typed_alias
* neon
;
586 /* Diagnostics used when we don't get a register of the expected type. */
587 const char * const reg_expected_msgs
[] =
589 N_("ARM register expected"),
590 N_("bad or missing co-processor number"),
591 N_("co-processor register expected"),
592 N_("FPA register expected"),
593 N_("VFP single precision register expected"),
594 N_("VFP/Neon double precision register expected"),
595 N_("Neon quad precision register expected"),
596 N_("VFP single or double precision register expected"),
597 N_("Neon double or quad precision register expected"),
598 N_("VFP single, double or Neon quad precision register expected"),
599 N_("VFP system register expected"),
600 N_("Maverick MVF register expected"),
601 N_("Maverick MVD register expected"),
602 N_("Maverick MVFX register expected"),
603 N_("Maverick MVDX register expected"),
604 N_("Maverick MVAX register expected"),
605 N_("Maverick DSPSC register expected"),
606 N_("iWMMXt data register expected"),
607 N_("iWMMXt control register expected"),
608 N_("iWMMXt scalar register expected"),
609 N_("XScale accumulator register expected"),
612 /* Some well known registers that we refer to directly elsewhere. */
618 /* ARM instructions take 4bytes in the object file, Thumb instructions
624 /* Basic string to match. */
625 const char * template_name
;
627 /* Parameters to instruction. */
628 unsigned int operands
[8];
630 /* Conditional tag - see opcode_lookup. */
631 unsigned int tag
: 4;
633 /* Basic instruction code. */
634 unsigned int avalue
: 28;
636 /* Thumb-format instruction code. */
639 /* Which architecture variant provides this instruction. */
640 const arm_feature_set
* avariant
;
641 const arm_feature_set
* tvariant
;
643 /* Function to call to encode instruction in ARM format. */
644 void (* aencode
) (void);
646 /* Function to call to encode instruction in Thumb format. */
647 void (* tencode
) (void);
650 /* Defines for various bits that we will want to toggle. */
651 #define INST_IMMEDIATE 0x02000000
652 #define OFFSET_REG 0x02000000
653 #define HWOFFSET_IMM 0x00400000
654 #define SHIFT_BY_REG 0x00000010
655 #define PRE_INDEX 0x01000000
656 #define INDEX_UP 0x00800000
657 #define WRITE_BACK 0x00200000
658 #define LDM_TYPE_2_OR_3 0x00400000
659 #define CPSI_MMOD 0x00020000
661 #define LITERAL_MASK 0xf000f000
662 #define OPCODE_MASK 0xfe1fffff
663 #define V4_STR_BIT 0x00000020
664 #define VLDR_VMOV_SAME 0x0040f000
666 #define T2_SUBS_PC_LR 0xf3de8f00
668 #define DATA_OP_SHIFT 21
670 #define T2_OPCODE_MASK 0xfe1fffff
671 #define T2_DATA_OP_SHIFT 21
673 #define A_COND_MASK 0xf0000000
674 #define A_PUSH_POP_OP_MASK 0x0fff0000
676 /* Opcodes for pushing/poping registers to/from the stack. */
677 #define A1_OPCODE_PUSH 0x092d0000
678 #define A2_OPCODE_PUSH 0x052d0004
679 #define A2_OPCODE_POP 0x049d0004
681 /* Codes to distinguish the arithmetic instructions. */
692 #define OPCODE_CMP 10
693 #define OPCODE_CMN 11
694 #define OPCODE_ORR 12
695 #define OPCODE_MOV 13
696 #define OPCODE_BIC 14
697 #define OPCODE_MVN 15
699 #define T2_OPCODE_AND 0
700 #define T2_OPCODE_BIC 1
701 #define T2_OPCODE_ORR 2
702 #define T2_OPCODE_ORN 3
703 #define T2_OPCODE_EOR 4
704 #define T2_OPCODE_ADD 8
705 #define T2_OPCODE_ADC 10
706 #define T2_OPCODE_SBC 11
707 #define T2_OPCODE_SUB 13
708 #define T2_OPCODE_RSB 14
710 #define T_OPCODE_MUL 0x4340
711 #define T_OPCODE_TST 0x4200
712 #define T_OPCODE_CMN 0x42c0
713 #define T_OPCODE_NEG 0x4240
714 #define T_OPCODE_MVN 0x43c0
716 #define T_OPCODE_ADD_R3 0x1800
717 #define T_OPCODE_SUB_R3 0x1a00
718 #define T_OPCODE_ADD_HI 0x4400
719 #define T_OPCODE_ADD_ST 0xb000
720 #define T_OPCODE_SUB_ST 0xb080
721 #define T_OPCODE_ADD_SP 0xa800
722 #define T_OPCODE_ADD_PC 0xa000
723 #define T_OPCODE_ADD_I8 0x3000
724 #define T_OPCODE_SUB_I8 0x3800
725 #define T_OPCODE_ADD_I3 0x1c00
726 #define T_OPCODE_SUB_I3 0x1e00
728 #define T_OPCODE_ASR_R 0x4100
729 #define T_OPCODE_LSL_R 0x4080
730 #define T_OPCODE_LSR_R 0x40c0
731 #define T_OPCODE_ROR_R 0x41c0
732 #define T_OPCODE_ASR_I 0x1000
733 #define T_OPCODE_LSL_I 0x0000
734 #define T_OPCODE_LSR_I 0x0800
736 #define T_OPCODE_MOV_I8 0x2000
737 #define T_OPCODE_CMP_I8 0x2800
738 #define T_OPCODE_CMP_LR 0x4280
739 #define T_OPCODE_MOV_HR 0x4600
740 #define T_OPCODE_CMP_HR 0x4500
742 #define T_OPCODE_LDR_PC 0x4800
743 #define T_OPCODE_LDR_SP 0x9800
744 #define T_OPCODE_STR_SP 0x9000
745 #define T_OPCODE_LDR_IW 0x6800
746 #define T_OPCODE_STR_IW 0x6000
747 #define T_OPCODE_LDR_IH 0x8800
748 #define T_OPCODE_STR_IH 0x8000
749 #define T_OPCODE_LDR_IB 0x7800
750 #define T_OPCODE_STR_IB 0x7000
751 #define T_OPCODE_LDR_RW 0x5800
752 #define T_OPCODE_STR_RW 0x5000
753 #define T_OPCODE_LDR_RH 0x5a00
754 #define T_OPCODE_STR_RH 0x5200
755 #define T_OPCODE_LDR_RB 0x5c00
756 #define T_OPCODE_STR_RB 0x5400
758 #define T_OPCODE_PUSH 0xb400
759 #define T_OPCODE_POP 0xbc00
761 #define T_OPCODE_BRANCH 0xe000
763 #define THUMB_SIZE 2 /* Size of thumb instruction. */
764 #define THUMB_PP_PC_LR 0x0100
765 #define THUMB_LOAD_BIT 0x0800
766 #define THUMB2_LOAD_BIT 0x00100000
768 #define BAD_ARGS _("bad arguments to instruction")
769 #define BAD_SP _("r13 not allowed here")
770 #define BAD_PC _("r15 not allowed here")
771 #define BAD_COND _("instruction cannot be conditional")
772 #define BAD_OVERLAP _("registers may not be the same")
773 #define BAD_HIREG _("lo register required")
774 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
775 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
776 #define BAD_BRANCH _("branch must be last instruction in IT block")
777 #define BAD_NOT_IT _("instruction not allowed in IT block")
778 #define BAD_FPU _("selected FPU does not support instruction")
779 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
780 #define BAD_IT_COND _("incorrect condition in IT block")
781 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
782 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
783 #define BAD_PC_ADDRESSING \
784 _("cannot use register index with PC-relative addressing")
785 #define BAD_PC_WRITEBACK \
786 _("cannot use writeback with PC-relative addressing")
787 #define BAD_RANGE _("branch out of range")
788 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
790 static struct hash_control
* arm_ops_hsh
;
791 static struct hash_control
* arm_cond_hsh
;
792 static struct hash_control
* arm_shift_hsh
;
793 static struct hash_control
* arm_psr_hsh
;
794 static struct hash_control
* arm_v7m_psr_hsh
;
795 static struct hash_control
* arm_reg_hsh
;
796 static struct hash_control
* arm_reloc_hsh
;
797 static struct hash_control
* arm_barrier_opt_hsh
;
799 /* Stuff needed to resolve the label ambiguity
808 symbolS
* last_label_seen
;
809 static int label_is_thumb_function_name
= FALSE
;
811 /* Literal pool structure. Held on a per-section
812 and per-sub-section basis. */
814 #define MAX_LITERAL_POOL_SIZE 1024
815 typedef struct literal_pool
817 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
818 unsigned int next_free_entry
;
824 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
826 struct literal_pool
* next
;
827 unsigned int alignment
;
830 /* Pointer to a linked list of literal pools. */
831 literal_pool
* list_of_pools
= NULL
;
833 typedef enum asmfunc_states
836 WAITING_ASMFUNC_NAME
,
840 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
843 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
845 static struct current_it now_it
;
849 now_it_compatible (int cond
)
851 return (cond
& ~1) == (now_it
.cc
& ~1);
855 conditional_insn (void)
857 return inst
.cond
!= COND_ALWAYS
;
860 static int in_it_block (void);
862 static int handle_it_state (void);
864 static void force_automatic_it_block_close (void);
866 static void it_fsm_post_encode (void);
868 #define set_it_insn_type(type) \
871 inst.it_insn_type = type; \
872 if (handle_it_state () == FAIL) \
877 #define set_it_insn_type_nonvoid(type, failret) \
880 inst.it_insn_type = type; \
881 if (handle_it_state () == FAIL) \
886 #define set_it_insn_type_last() \
889 if (inst.cond == COND_ALWAYS) \
890 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
892 set_it_insn_type (INSIDE_IT_LAST_INSN); \
898 /* This array holds the chars that always start a comment. If the
899 pre-processor is disabled, these aren't very useful. */
900 char arm_comment_chars
[] = "@";
902 /* This array holds the chars that only start a comment at the beginning of
903 a line. If the line seems to have the form '# 123 filename'
904 .line and .file directives will appear in the pre-processed output. */
905 /* Note that input_file.c hand checks for '#' at the beginning of the
906 first line of the input file. This is because the compiler outputs
907 #NO_APP at the beginning of its output. */
908 /* Also note that comments like this one will always work. */
909 const char line_comment_chars
[] = "#";
911 char arm_line_separator_chars
[] = ";";
913 /* Chars that can be used to separate mant
914 from exp in floating point numbers. */
915 const char EXP_CHARS
[] = "eE";
917 /* Chars that mean this number is a floating point constant. */
921 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
923 /* Prefix characters that indicate the start of an immediate
925 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
927 /* Separator character handling. */
929 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
932 skip_past_char (char ** str
, char c
)
934 /* PR gas/14987: Allow for whitespace before the expected character. */
935 skip_whitespace (*str
);
946 #define skip_past_comma(str) skip_past_char (str, ',')
948 /* Arithmetic expressions (possibly involving symbols). */
950 /* Return TRUE if anything in the expression is a bignum. */
953 walk_no_bignums (symbolS
* sp
)
955 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
958 if (symbol_get_value_expression (sp
)->X_add_symbol
)
960 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
961 || (symbol_get_value_expression (sp
)->X_op_symbol
962 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
968 static int in_my_get_expression
= 0;
970 /* Third argument to my_get_expression. */
971 #define GE_NO_PREFIX 0
972 #define GE_IMM_PREFIX 1
973 #define GE_OPT_PREFIX 2
974 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
975 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
976 #define GE_OPT_PREFIX_BIG 3
979 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
984 /* In unified syntax, all prefixes are optional. */
986 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
991 case GE_NO_PREFIX
: break;
993 if (!is_immediate_prefix (**str
))
995 inst
.error
= _("immediate expression requires a # prefix");
1001 case GE_OPT_PREFIX_BIG
:
1002 if (is_immediate_prefix (**str
))
1008 memset (ep
, 0, sizeof (expressionS
));
1010 save_in
= input_line_pointer
;
1011 input_line_pointer
= *str
;
1012 in_my_get_expression
= 1;
1013 seg
= expression (ep
);
1014 in_my_get_expression
= 0;
1016 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1018 /* We found a bad or missing expression in md_operand(). */
1019 *str
= input_line_pointer
;
1020 input_line_pointer
= save_in
;
1021 if (inst
.error
== NULL
)
1022 inst
.error
= (ep
->X_op
== O_absent
1023 ? _("missing expression") :_("bad expression"));
1028 if (seg
!= absolute_section
1029 && seg
!= text_section
1030 && seg
!= data_section
1031 && seg
!= bss_section
1032 && seg
!= undefined_section
)
1034 inst
.error
= _("bad segment");
1035 *str
= input_line_pointer
;
1036 input_line_pointer
= save_in
;
1043 /* Get rid of any bignums now, so that we don't generate an error for which
1044 we can't establish a line number later on. Big numbers are never valid
1045 in instructions, which is where this routine is always called. */
1046 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1047 && (ep
->X_op
== O_big
1048 || (ep
->X_add_symbol
1049 && (walk_no_bignums (ep
->X_add_symbol
)
1051 && walk_no_bignums (ep
->X_op_symbol
))))))
1053 inst
.error
= _("invalid constant");
1054 *str
= input_line_pointer
;
1055 input_line_pointer
= save_in
;
1059 *str
= input_line_pointer
;
1060 input_line_pointer
= save_in
;
1064 /* Turn a string in input_line_pointer into a floating point constant
1065 of type TYPE, and store the appropriate bytes in *LITP. The number
1066 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1067 returned, or NULL on OK.
1069 Note that fp constants aren't represent in the normal way on the ARM.
1070 In big endian mode, things are as expected. However, in little endian
1071 mode fp constants are big-endian word-wise, and little-endian byte-wise
1072 within the words. For example, (double) 1.1 in big endian mode is
1073 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1074 the byte sequence 99 99 f1 3f 9a 99 99 99.
1076 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1079 md_atof (int type
, char * litP
, int * sizeP
)
1082 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1114 return _("Unrecognized or unsupported floating point constant");
1117 t
= atof_ieee (input_line_pointer
, type
, words
);
1119 input_line_pointer
= t
;
1120 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1122 if (target_big_endian
)
1124 for (i
= 0; i
< prec
; i
++)
1126 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1127 litP
+= sizeof (LITTLENUM_TYPE
);
1132 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1133 for (i
= prec
- 1; i
>= 0; i
--)
1135 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1136 litP
+= sizeof (LITTLENUM_TYPE
);
1139 /* For a 4 byte float the order of elements in `words' is 1 0.
1140 For an 8 byte float the order is 1 0 3 2. */
1141 for (i
= 0; i
< prec
; i
+= 2)
1143 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1144 sizeof (LITTLENUM_TYPE
));
1145 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1146 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1147 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1154 /* We handle all bad expressions here, so that we can report the faulty
1155 instruction in the error message. */
1157 md_operand (expressionS
* exp
)
1159 if (in_my_get_expression
)
1160 exp
->X_op
= O_illegal
;
1163 /* Immediate values. */
1165 /* Generic immediate-value read function for use in directives.
1166 Accepts anything that 'expression' can fold to a constant.
1167 *val receives the number. */
1170 immediate_for_directive (int *val
)
1173 exp
.X_op
= O_illegal
;
1175 if (is_immediate_prefix (*input_line_pointer
))
1177 input_line_pointer
++;
1181 if (exp
.X_op
!= O_constant
)
1183 as_bad (_("expected #constant"));
1184 ignore_rest_of_line ();
1187 *val
= exp
.X_add_number
;
1192 /* Register parsing. */
1194 /* Generic register parser. CCP points to what should be the
1195 beginning of a register name. If it is indeed a valid register
1196 name, advance CCP over it and return the reg_entry structure;
1197 otherwise return NULL. Does not issue diagnostics. */
1199 static struct reg_entry
*
1200 arm_reg_parse_multi (char **ccp
)
1204 struct reg_entry
*reg
;
1206 skip_whitespace (start
);
1208 #ifdef REGISTER_PREFIX
1209 if (*start
!= REGISTER_PREFIX
)
1213 #ifdef OPTIONAL_REGISTER_PREFIX
1214 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1219 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1224 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1226 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1236 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1237 enum arm_reg_type type
)
1239 /* Alternative syntaxes are accepted for a few register classes. */
1246 /* Generic coprocessor register names are allowed for these. */
1247 if (reg
&& reg
->type
== REG_TYPE_CN
)
1252 /* For backward compatibility, a bare number is valid here. */
1254 unsigned long processor
= strtoul (start
, ccp
, 10);
1255 if (*ccp
!= start
&& processor
<= 15)
1259 case REG_TYPE_MMXWC
:
1260 /* WC includes WCG. ??? I'm not sure this is true for all
1261 instructions that take WC registers. */
1262 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1273 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1274 return value is the register number or FAIL. */
1277 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1280 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1283 /* Do not allow a scalar (reg+index) to parse as a register. */
1284 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1287 if (reg
&& reg
->type
== type
)
1290 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1297 /* Parse a Neon type specifier. *STR should point at the leading '.'
1298 character. Does no verification at this stage that the type fits the opcode
1305 Can all be legally parsed by this function.
1307 Fills in neon_type struct pointer with parsed information, and updates STR
1308 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1309 type, FAIL if not. */
1312 parse_neon_type (struct neon_type
*type
, char **str
)
1319 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1321 enum neon_el_type thistype
= NT_untyped
;
1322 unsigned thissize
= -1u;
1329 /* Just a size without an explicit type. */
1333 switch (TOLOWER (*ptr
))
1335 case 'i': thistype
= NT_integer
; break;
1336 case 'f': thistype
= NT_float
; break;
1337 case 'p': thistype
= NT_poly
; break;
1338 case 's': thistype
= NT_signed
; break;
1339 case 'u': thistype
= NT_unsigned
; break;
1341 thistype
= NT_float
;
1346 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1352 /* .f is an abbreviation for .f32. */
1353 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1358 thissize
= strtoul (ptr
, &ptr
, 10);
1360 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1363 as_bad (_("bad size %d in type specifier"), thissize
);
1371 type
->el
[type
->elems
].type
= thistype
;
1372 type
->el
[type
->elems
].size
= thissize
;
1377 /* Empty/missing type is not a successful parse. */
1378 if (type
->elems
== 0)
1386 /* Errors may be set multiple times during parsing or bit encoding
1387 (particularly in the Neon bits), but usually the earliest error which is set
1388 will be the most meaningful. Avoid overwriting it with later (cascading)
1389 errors by calling this function. */
1392 first_error (const char *err
)
1398 /* Parse a single type, e.g. ".s32", leading period included. */
1400 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1403 struct neon_type optype
;
1407 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1409 if (optype
.elems
== 1)
1410 *vectype
= optype
.el
[0];
1413 first_error (_("only one type should be specified for operand"));
1419 first_error (_("vector type expected"));
1431 /* Special meanings for indices (which have a range of 0-7), which will fit into
1434 #define NEON_ALL_LANES 15
1435 #define NEON_INTERLEAVE_LANES 14
1437 /* Parse either a register or a scalar, with an optional type. Return the
1438 register number, and optionally fill in the actual type of the register
1439 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1440 type/index information in *TYPEINFO. */
1443 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1444 enum arm_reg_type
*rtype
,
1445 struct neon_typed_alias
*typeinfo
)
1448 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1449 struct neon_typed_alias atype
;
1450 struct neon_type_el parsetype
;
1454 atype
.eltype
.type
= NT_invtype
;
1455 atype
.eltype
.size
= -1;
1457 /* Try alternate syntax for some types of register. Note these are mutually
1458 exclusive with the Neon syntax extensions. */
1461 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1469 /* Undo polymorphism when a set of register types may be accepted. */
1470 if ((type
== REG_TYPE_NDQ
1471 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1472 || (type
== REG_TYPE_VFSD
1473 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1474 || (type
== REG_TYPE_NSDQ
1475 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1476 || reg
->type
== REG_TYPE_NQ
))
1477 || (type
== REG_TYPE_MMXWC
1478 && (reg
->type
== REG_TYPE_MMXWCG
)))
1479 type
= (enum arm_reg_type
) reg
->type
;
1481 if (type
!= reg
->type
)
1487 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1489 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1491 first_error (_("can't redefine type for operand"));
1494 atype
.defined
|= NTA_HASTYPE
;
1495 atype
.eltype
= parsetype
;
1498 if (skip_past_char (&str
, '[') == SUCCESS
)
1500 if (type
!= REG_TYPE_VFD
)
1502 first_error (_("only D registers may be indexed"));
1506 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1508 first_error (_("can't change index for operand"));
1512 atype
.defined
|= NTA_HASINDEX
;
1514 if (skip_past_char (&str
, ']') == SUCCESS
)
1515 atype
.index
= NEON_ALL_LANES
;
1520 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1522 if (exp
.X_op
!= O_constant
)
1524 first_error (_("constant expression required"));
1528 if (skip_past_char (&str
, ']') == FAIL
)
1531 atype
.index
= exp
.X_add_number
;
1546 /* Like arm_reg_parse, but allow allow the following extra features:
1547 - If RTYPE is non-zero, return the (possibly restricted) type of the
1548 register (e.g. Neon double or quad reg when either has been requested).
1549 - If this is a Neon vector type with additional type information, fill
1550 in the struct pointed to by VECTYPE (if non-NULL).
1551 This function will fault on encountering a scalar. */
1554 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1555 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1557 struct neon_typed_alias atype
;
1559 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1564 /* Do not allow regname(... to parse as a register. */
1568 /* Do not allow a scalar (reg+index) to parse as a register. */
1569 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1571 first_error (_("register operand expected, but got scalar"));
1576 *vectype
= atype
.eltype
;
1583 #define NEON_SCALAR_REG(X) ((X) >> 4)
1584 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1586 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1587 have enough information to be able to do a good job bounds-checking. So, we
1588 just do easy checks here, and do further checks later. */
1591 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1595 struct neon_typed_alias atype
;
1597 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1599 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1602 if (atype
.index
== NEON_ALL_LANES
)
1604 first_error (_("scalar must have an index"));
1607 else if (atype
.index
>= 64 / elsize
)
1609 first_error (_("scalar index out of range"));
1614 *type
= atype
.eltype
;
1618 return reg
* 16 + atype
.index
;
1621 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1624 parse_reg_list (char ** strp
)
1626 char * str
= * strp
;
1630 /* We come back here if we get ranges concatenated by '+' or '|'. */
1633 skip_whitespace (str
);
1647 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1649 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1659 first_error (_("bad range in register list"));
1663 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1665 if (range
& (1 << i
))
1667 (_("Warning: duplicated register (r%d) in register list"),
1675 if (range
& (1 << reg
))
1676 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1678 else if (reg
<= cur_reg
)
1679 as_tsktsk (_("Warning: register range not in ascending order"));
1684 while (skip_past_comma (&str
) != FAIL
1685 || (in_range
= 1, *str
++ == '-'));
1688 if (skip_past_char (&str
, '}') == FAIL
)
1690 first_error (_("missing `}'"));
1698 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1701 if (exp
.X_op
== O_constant
)
1703 if (exp
.X_add_number
1704 != (exp
.X_add_number
& 0x0000ffff))
1706 inst
.error
= _("invalid register mask");
1710 if ((range
& exp
.X_add_number
) != 0)
1712 int regno
= range
& exp
.X_add_number
;
1715 regno
= (1 << regno
) - 1;
1717 (_("Warning: duplicated register (r%d) in register list"),
1721 range
|= exp
.X_add_number
;
1725 if (inst
.reloc
.type
!= 0)
1727 inst
.error
= _("expression too complex");
1731 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1732 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1733 inst
.reloc
.pc_rel
= 0;
1737 if (*str
== '|' || *str
== '+')
1743 while (another_range
);
1749 /* Types of registers in a list. */
1758 /* Parse a VFP register list. If the string is invalid return FAIL.
1759 Otherwise return the number of registers, and set PBASE to the first
1760 register. Parses registers of type ETYPE.
1761 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1762 - Q registers can be used to specify pairs of D registers
1763 - { } can be omitted from around a singleton register list
1764 FIXME: This is not implemented, as it would require backtracking in
1767 This could be done (the meaning isn't really ambiguous), but doesn't
1768 fit in well with the current parsing framework.
1769 - 32 D registers may be used (also true for VFPv3).
1770 FIXME: Types are ignored in these register lists, which is probably a
1774 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1779 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1783 unsigned long mask
= 0;
1786 if (skip_past_char (&str
, '{') == FAIL
)
1788 inst
.error
= _("expecting {");
1795 regtype
= REG_TYPE_VFS
;
1800 regtype
= REG_TYPE_VFD
;
1803 case REGLIST_NEON_D
:
1804 regtype
= REG_TYPE_NDQ
;
1808 if (etype
!= REGLIST_VFP_S
)
1810 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1811 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1815 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1818 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1825 base_reg
= max_regs
;
1829 int setmask
= 1, addregs
= 1;
1831 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1833 if (new_base
== FAIL
)
1835 first_error (_(reg_expected_msgs
[regtype
]));
1839 if (new_base
>= max_regs
)
1841 first_error (_("register out of range in list"));
1845 /* Note: a value of 2 * n is returned for the register Q<n>. */
1846 if (regtype
== REG_TYPE_NQ
)
1852 if (new_base
< base_reg
)
1853 base_reg
= new_base
;
1855 if (mask
& (setmask
<< new_base
))
1857 first_error (_("invalid register list"));
1861 if ((mask
>> new_base
) != 0 && ! warned
)
1863 as_tsktsk (_("register list not in ascending order"));
1867 mask
|= setmask
<< new_base
;
1870 if (*str
== '-') /* We have the start of a range expression */
1876 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1879 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1883 if (high_range
>= max_regs
)
1885 first_error (_("register out of range in list"));
1889 if (regtype
== REG_TYPE_NQ
)
1890 high_range
= high_range
+ 1;
1892 if (high_range
<= new_base
)
1894 inst
.error
= _("register range not in ascending order");
1898 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1900 if (mask
& (setmask
<< new_base
))
1902 inst
.error
= _("invalid register list");
1906 mask
|= setmask
<< new_base
;
1911 while (skip_past_comma (&str
) != FAIL
);
1915 /* Sanity check -- should have raised a parse error above. */
1916 if (count
== 0 || count
> max_regs
)
1921 /* Final test -- the registers must be consecutive. */
1923 for (i
= 0; i
< count
; i
++)
1925 if ((mask
& (1u << i
)) == 0)
1927 inst
.error
= _("non-contiguous register range");
1937 /* True if two alias types are the same. */
1940 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1948 if (a
->defined
!= b
->defined
)
1951 if ((a
->defined
& NTA_HASTYPE
) != 0
1952 && (a
->eltype
.type
!= b
->eltype
.type
1953 || a
->eltype
.size
!= b
->eltype
.size
))
1956 if ((a
->defined
& NTA_HASINDEX
) != 0
1957 && (a
->index
!= b
->index
))
1963 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1964 The base register is put in *PBASE.
1965 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1967 The register stride (minus one) is put in bit 4 of the return value.
1968 Bits [6:5] encode the list length (minus one).
1969 The type of the list elements is put in *ELTYPE, if non-NULL. */
1971 #define NEON_LANE(X) ((X) & 0xf)
1972 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1973 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1976 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1977 struct neon_type_el
*eltype
)
1984 int leading_brace
= 0;
1985 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1986 const char *const incr_error
= _("register stride must be 1 or 2");
1987 const char *const type_error
= _("mismatched element/structure types in list");
1988 struct neon_typed_alias firsttype
;
1990 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1995 struct neon_typed_alias atype
;
1996 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2000 first_error (_(reg_expected_msgs
[rtype
]));
2007 if (rtype
== REG_TYPE_NQ
)
2013 else if (reg_incr
== -1)
2015 reg_incr
= getreg
- base_reg
;
2016 if (reg_incr
< 1 || reg_incr
> 2)
2018 first_error (_(incr_error
));
2022 else if (getreg
!= base_reg
+ reg_incr
* count
)
2024 first_error (_(incr_error
));
2028 if (! neon_alias_types_same (&atype
, &firsttype
))
2030 first_error (_(type_error
));
2034 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2038 struct neon_typed_alias htype
;
2039 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2041 lane
= NEON_INTERLEAVE_LANES
;
2042 else if (lane
!= NEON_INTERLEAVE_LANES
)
2044 first_error (_(type_error
));
2049 else if (reg_incr
!= 1)
2051 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2055 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2058 first_error (_(reg_expected_msgs
[rtype
]));
2061 if (! neon_alias_types_same (&htype
, &firsttype
))
2063 first_error (_(type_error
));
2066 count
+= hireg
+ dregs
- getreg
;
2070 /* If we're using Q registers, we can't use [] or [n] syntax. */
2071 if (rtype
== REG_TYPE_NQ
)
2077 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2081 else if (lane
!= atype
.index
)
2083 first_error (_(type_error
));
2087 else if (lane
== -1)
2088 lane
= NEON_INTERLEAVE_LANES
;
2089 else if (lane
!= NEON_INTERLEAVE_LANES
)
2091 first_error (_(type_error
));
2096 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2098 /* No lane set by [x]. We must be interleaving structures. */
2100 lane
= NEON_INTERLEAVE_LANES
;
2103 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2104 || (count
> 1 && reg_incr
== -1))
2106 first_error (_("error parsing element/structure list"));
2110 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2112 first_error (_("expected }"));
2120 *eltype
= firsttype
.eltype
;
2125 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2128 /* Parse an explicit relocation suffix on an expression. This is
2129 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2130 arm_reloc_hsh contains no entries, so this function can only
2131 succeed if there is no () after the word. Returns -1 on error,
2132 BFD_RELOC_UNUSED if there wasn't any suffix. */
2135 parse_reloc (char **str
)
2137 struct reloc_entry
*r
;
2141 return BFD_RELOC_UNUSED
;
2146 while (*q
&& *q
!= ')' && *q
!= ',')
2151 if ((r
= (struct reloc_entry
*)
2152 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2159 /* Directives: register aliases. */
2161 static struct reg_entry
*
2162 insert_reg_alias (char *str
, unsigned number
, int type
)
2164 struct reg_entry
*new_reg
;
2167 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2169 if (new_reg
->builtin
)
2170 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2172 /* Only warn about a redefinition if it's not defined as the
2174 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2175 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2180 name
= xstrdup (str
);
2181 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2183 new_reg
->name
= name
;
2184 new_reg
->number
= number
;
2185 new_reg
->type
= type
;
2186 new_reg
->builtin
= FALSE
;
2187 new_reg
->neon
= NULL
;
2189 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2196 insert_neon_reg_alias (char *str
, int number
, int type
,
2197 struct neon_typed_alias
*atype
)
2199 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2203 first_error (_("attempt to redefine typed alias"));
2209 reg
->neon
= (struct neon_typed_alias
*)
2210 xmalloc (sizeof (struct neon_typed_alias
));
2211 *reg
->neon
= *atype
;
2215 /* Look for the .req directive. This is of the form:
2217 new_register_name .req existing_register_name
2219 If we find one, or if it looks sufficiently like one that we want to
2220 handle any error here, return TRUE. Otherwise return FALSE. */
2223 create_register_alias (char * newname
, char *p
)
2225 struct reg_entry
*old
;
2226 char *oldname
, *nbuf
;
2229 /* The input scrubber ensures that whitespace after the mnemonic is
2230 collapsed to single spaces. */
2232 if (strncmp (oldname
, " .req ", 6) != 0)
2236 if (*oldname
== '\0')
2239 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2242 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2246 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2247 the desired alias name, and p points to its end. If not, then
2248 the desired alias name is in the global original_case_string. */
2249 #ifdef TC_CASE_SENSITIVE
2252 newname
= original_case_string
;
2253 nlen
= strlen (newname
);
2256 nbuf
= (char *) alloca (nlen
+ 1);
2257 memcpy (nbuf
, newname
, nlen
);
2260 /* Create aliases under the new name as stated; an all-lowercase
2261 version of the new name; and an all-uppercase version of the new
2263 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2265 for (p
= nbuf
; *p
; p
++)
2268 if (strncmp (nbuf
, newname
, nlen
))
2270 /* If this attempt to create an additional alias fails, do not bother
2271 trying to create the all-lower case alias. We will fail and issue
2272 a second, duplicate error message. This situation arises when the
2273 programmer does something like:
2276 The second .req creates the "Foo" alias but then fails to create
2277 the artificial FOO alias because it has already been created by the
2279 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2283 for (p
= nbuf
; *p
; p
++)
2286 if (strncmp (nbuf
, newname
, nlen
))
2287 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2293 /* Create a Neon typed/indexed register alias using directives, e.g.:
2298 These typed registers can be used instead of the types specified after the
2299 Neon mnemonic, so long as all operands given have types. Types can also be
2300 specified directly, e.g.:
2301 vadd d0.s32, d1.s32, d2.s32 */
2304 create_neon_reg_alias (char *newname
, char *p
)
2306 enum arm_reg_type basetype
;
2307 struct reg_entry
*basereg
;
2308 struct reg_entry mybasereg
;
2309 struct neon_type ntype
;
2310 struct neon_typed_alias typeinfo
;
2311 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2314 typeinfo
.defined
= 0;
2315 typeinfo
.eltype
.type
= NT_invtype
;
2316 typeinfo
.eltype
.size
= -1;
2317 typeinfo
.index
= -1;
2321 if (strncmp (p
, " .dn ", 5) == 0)
2322 basetype
= REG_TYPE_VFD
;
2323 else if (strncmp (p
, " .qn ", 5) == 0)
2324 basetype
= REG_TYPE_NQ
;
2333 basereg
= arm_reg_parse_multi (&p
);
2335 if (basereg
&& basereg
->type
!= basetype
)
2337 as_bad (_("bad type for register"));
2341 if (basereg
== NULL
)
2344 /* Try parsing as an integer. */
2345 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2346 if (exp
.X_op
!= O_constant
)
2348 as_bad (_("expression must be constant"));
2351 basereg
= &mybasereg
;
2352 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2358 typeinfo
= *basereg
->neon
;
2360 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2362 /* We got a type. */
2363 if (typeinfo
.defined
& NTA_HASTYPE
)
2365 as_bad (_("can't redefine the type of a register alias"));
2369 typeinfo
.defined
|= NTA_HASTYPE
;
2370 if (ntype
.elems
!= 1)
2372 as_bad (_("you must specify a single type only"));
2375 typeinfo
.eltype
= ntype
.el
[0];
2378 if (skip_past_char (&p
, '[') == SUCCESS
)
2381 /* We got a scalar index. */
2383 if (typeinfo
.defined
& NTA_HASINDEX
)
2385 as_bad (_("can't redefine the index of a scalar alias"));
2389 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2391 if (exp
.X_op
!= O_constant
)
2393 as_bad (_("scalar index must be constant"));
2397 typeinfo
.defined
|= NTA_HASINDEX
;
2398 typeinfo
.index
= exp
.X_add_number
;
2400 if (skip_past_char (&p
, ']') == FAIL
)
2402 as_bad (_("expecting ]"));
2407 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2408 the desired alias name, and p points to its end. If not, then
2409 the desired alias name is in the global original_case_string. */
2410 #ifdef TC_CASE_SENSITIVE
2411 namelen
= nameend
- newname
;
2413 newname
= original_case_string
;
2414 namelen
= strlen (newname
);
2417 namebuf
= (char *) alloca (namelen
+ 1);
2418 strncpy (namebuf
, newname
, namelen
);
2419 namebuf
[namelen
] = '\0';
2421 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2422 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2424 /* Insert name in all uppercase. */
2425 for (p
= namebuf
; *p
; p
++)
2428 if (strncmp (namebuf
, newname
, namelen
))
2429 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2430 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2432 /* Insert name in all lowercase. */
2433 for (p
= namebuf
; *p
; p
++)
2436 if (strncmp (namebuf
, newname
, namelen
))
2437 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2438 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2443 /* Should never be called, as .req goes between the alias and the
2444 register name, not at the beginning of the line. */
2447 s_req (int a ATTRIBUTE_UNUSED
)
2449 as_bad (_("invalid syntax for .req directive"));
2453 s_dn (int a ATTRIBUTE_UNUSED
)
2455 as_bad (_("invalid syntax for .dn directive"));
2459 s_qn (int a ATTRIBUTE_UNUSED
)
2461 as_bad (_("invalid syntax for .qn directive"));
2464 /* The .unreq directive deletes an alias which was previously defined
2465 by .req. For example:
2471 s_unreq (int a ATTRIBUTE_UNUSED
)
2476 name
= input_line_pointer
;
2478 while (*input_line_pointer
!= 0
2479 && *input_line_pointer
!= ' '
2480 && *input_line_pointer
!= '\n')
2481 ++input_line_pointer
;
2483 saved_char
= *input_line_pointer
;
2484 *input_line_pointer
= 0;
2487 as_bad (_("invalid syntax for .unreq directive"));
2490 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2494 as_bad (_("unknown register alias '%s'"), name
);
2495 else if (reg
->builtin
)
2496 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2503 hash_delete (arm_reg_hsh
, name
, FALSE
);
2504 free ((char *) reg
->name
);
2509 /* Also locate the all upper case and all lower case versions.
2510 Do not complain if we cannot find one or the other as it
2511 was probably deleted above. */
2513 nbuf
= strdup (name
);
2514 for (p
= nbuf
; *p
; p
++)
2516 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2519 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2520 free ((char *) reg
->name
);
2526 for (p
= nbuf
; *p
; p
++)
2528 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2531 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2532 free ((char *) reg
->name
);
2542 *input_line_pointer
= saved_char
;
2543 demand_empty_rest_of_line ();
2546 /* Directives: Instruction set selection. */
2549 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2550 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2551 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2552 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2554 /* Create a new mapping symbol for the transition to STATE. */
2557 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2560 const char * symname
;
2567 type
= BSF_NO_FLAGS
;
2571 type
= BSF_NO_FLAGS
;
2575 type
= BSF_NO_FLAGS
;
2581 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2582 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2587 THUMB_SET_FUNC (symbolP
, 0);
2588 ARM_SET_THUMB (symbolP
, 0);
2589 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2593 THUMB_SET_FUNC (symbolP
, 1);
2594 ARM_SET_THUMB (symbolP
, 1);
2595 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2603 /* Save the mapping symbols for future reference. Also check that
2604 we do not place two mapping symbols at the same offset within a
2605 frag. We'll handle overlap between frags in
2606 check_mapping_symbols.
2608 If .fill or other data filling directive generates zero sized data,
2609 the mapping symbol for the following code will have the same value
2610 as the one generated for the data filling directive. In this case,
2611 we replace the old symbol with the new one at the same address. */
2614 if (frag
->tc_frag_data
.first_map
!= NULL
)
2616 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2617 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2619 frag
->tc_frag_data
.first_map
= symbolP
;
2621 if (frag
->tc_frag_data
.last_map
!= NULL
)
2623 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2624 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2625 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2627 frag
->tc_frag_data
.last_map
= symbolP
;
2630 /* We must sometimes convert a region marked as code to data during
2631 code alignment, if an odd number of bytes have to be padded. The
2632 code mapping symbol is pushed to an aligned address. */
2635 insert_data_mapping_symbol (enum mstate state
,
2636 valueT value
, fragS
*frag
, offsetT bytes
)
2638 /* If there was already a mapping symbol, remove it. */
2639 if (frag
->tc_frag_data
.last_map
!= NULL
2640 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2642 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2646 know (frag
->tc_frag_data
.first_map
== symp
);
2647 frag
->tc_frag_data
.first_map
= NULL
;
2649 frag
->tc_frag_data
.last_map
= NULL
;
2650 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2653 make_mapping_symbol (MAP_DATA
, value
, frag
);
2654 make_mapping_symbol (state
, value
+ bytes
, frag
);
2657 static void mapping_state_2 (enum mstate state
, int max_chars
);
2659 /* Set the mapping state to STATE. Only call this when about to
2660 emit some STATE bytes to the file. */
2662 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2664 mapping_state (enum mstate state
)
2666 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2668 if (mapstate
== state
)
2669 /* The mapping symbol has already been emitted.
2670 There is nothing else to do. */
2673 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2675 All ARM instructions require 4-byte alignment.
2676 (Almost) all Thumb instructions require 2-byte alignment.
2678 When emitting instructions into any section, mark the section
2681 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2682 but themselves require 2-byte alignment; this applies to some
2683 PC- relative forms. However, these cases will invovle implicit
2684 literal pool generation or an explicit .align >=2, both of
2685 which will cause the section to me marked with sufficient
2686 alignment. Thus, we don't handle those cases here. */
2687 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2689 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2690 /* This case will be evaluated later. */
2693 mapping_state_2 (state
, 0);
2696 /* Same as mapping_state, but MAX_CHARS bytes have already been
2697 allocated. Put the mapping symbol that far back. */
2700 mapping_state_2 (enum mstate state
, int max_chars
)
2702 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2704 if (!SEG_NORMAL (now_seg
))
2707 if (mapstate
== state
)
2708 /* The mapping symbol has already been emitted.
2709 There is nothing else to do. */
2712 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2713 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2715 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2716 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2719 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2722 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2723 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2727 #define mapping_state(x) ((void)0)
2728 #define mapping_state_2(x, y) ((void)0)
2731 /* Find the real, Thumb encoded start of a Thumb function. */
2735 find_real_start (symbolS
* symbolP
)
2738 const char * name
= S_GET_NAME (symbolP
);
2739 symbolS
* new_target
;
2741 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2742 #define STUB_NAME ".real_start_of"
2747 /* The compiler may generate BL instructions to local labels because
2748 it needs to perform a branch to a far away location. These labels
2749 do not have a corresponding ".real_start_of" label. We check
2750 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2751 the ".real_start_of" convention for nonlocal branches. */
2752 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2755 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2756 new_target
= symbol_find (real_start
);
2758 if (new_target
== NULL
)
2760 as_warn (_("Failed to find real start of function: %s\n"), name
);
2761 new_target
= symbolP
;
2769 opcode_select (int width
)
2776 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2777 as_bad (_("selected processor does not support THUMB opcodes"));
2780 /* No need to force the alignment, since we will have been
2781 coming from ARM mode, which is word-aligned. */
2782 record_alignment (now_seg
, 1);
2789 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2790 as_bad (_("selected processor does not support ARM opcodes"));
2795 frag_align (2, 0, 0);
2797 record_alignment (now_seg
, 1);
2802 as_bad (_("invalid instruction size selected (%d)"), width
);
2807 s_arm (int ignore ATTRIBUTE_UNUSED
)
2810 demand_empty_rest_of_line ();
2814 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2817 demand_empty_rest_of_line ();
2821 s_code (int unused ATTRIBUTE_UNUSED
)
2825 temp
= get_absolute_expression ();
2830 opcode_select (temp
);
2834 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2839 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2841 /* If we are not already in thumb mode go into it, EVEN if
2842 the target processor does not support thumb instructions.
2843 This is used by gcc/config/arm/lib1funcs.asm for example
2844 to compile interworking support functions even if the
2845 target processor should not support interworking. */
2849 record_alignment (now_seg
, 1);
2852 demand_empty_rest_of_line ();
2856 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2860 /* The following label is the name/address of the start of a Thumb function.
2861 We need to know this for the interworking support. */
2862 label_is_thumb_function_name
= TRUE
;
2865 /* Perform a .set directive, but also mark the alias as
2866 being a thumb function. */
2869 s_thumb_set (int equiv
)
2871 /* XXX the following is a duplicate of the code for s_set() in read.c
2872 We cannot just call that code as we need to get at the symbol that
2879 /* Especial apologies for the random logic:
2880 This just grew, and could be parsed much more simply!
2882 delim
= get_symbol_name (& name
);
2883 end_name
= input_line_pointer
;
2884 (void) restore_line_pointer (delim
);
2886 if (*input_line_pointer
!= ',')
2889 as_bad (_("expected comma after name \"%s\""), name
);
2891 ignore_rest_of_line ();
2895 input_line_pointer
++;
2898 if (name
[0] == '.' && name
[1] == '\0')
2900 /* XXX - this should not happen to .thumb_set. */
2904 if ((symbolP
= symbol_find (name
)) == NULL
2905 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2908 /* When doing symbol listings, play games with dummy fragments living
2909 outside the normal fragment chain to record the file and line info
2911 if (listing
& LISTING_SYMBOLS
)
2913 extern struct list_info_struct
* listing_tail
;
2914 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2916 memset (dummy_frag
, 0, sizeof (fragS
));
2917 dummy_frag
->fr_type
= rs_fill
;
2918 dummy_frag
->line
= listing_tail
;
2919 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2920 dummy_frag
->fr_symbol
= symbolP
;
2924 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2927 /* "set" symbols are local unless otherwise specified. */
2928 SF_SET_LOCAL (symbolP
);
2929 #endif /* OBJ_COFF */
2930 } /* Make a new symbol. */
2932 symbol_table_insert (symbolP
);
2937 && S_IS_DEFINED (symbolP
)
2938 && S_GET_SEGMENT (symbolP
) != reg_section
)
2939 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2941 pseudo_set (symbolP
);
2943 demand_empty_rest_of_line ();
2945 /* XXX Now we come to the Thumb specific bit of code. */
2947 THUMB_SET_FUNC (symbolP
, 1);
2948 ARM_SET_THUMB (symbolP
, 1);
2949 #if defined OBJ_ELF || defined OBJ_COFF
2950 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2954 /* Directives: Mode selection. */
2956 /* .syntax [unified|divided] - choose the new unified syntax
2957 (same for Arm and Thumb encoding, modulo slight differences in what
2958 can be represented) or the old divergent syntax for each mode. */
2960 s_syntax (int unused ATTRIBUTE_UNUSED
)
2964 delim
= get_symbol_name (& name
);
2966 if (!strcasecmp (name
, "unified"))
2967 unified_syntax
= TRUE
;
2968 else if (!strcasecmp (name
, "divided"))
2969 unified_syntax
= FALSE
;
2972 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2975 (void) restore_line_pointer (delim
);
2976 demand_empty_rest_of_line ();
2979 /* Directives: sectioning and alignment. */
2982 s_bss (int ignore ATTRIBUTE_UNUSED
)
2984 /* We don't support putting frags in the BSS segment, we fake it by
2985 marking in_bss, then looking at s_skip for clues. */
2986 subseg_set (bss_section
, 0);
2987 demand_empty_rest_of_line ();
2989 #ifdef md_elf_section_change_hook
2990 md_elf_section_change_hook ();
2995 s_even (int ignore ATTRIBUTE_UNUSED
)
2997 /* Never make frag if expect extra pass. */
2999 frag_align (1, 0, 0);
3001 record_alignment (now_seg
, 1);
3003 demand_empty_rest_of_line ();
3006 /* Directives: CodeComposer Studio. */
3008 /* .ref (for CodeComposer Studio syntax only). */
3010 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3012 if (codecomposer_syntax
)
3013 ignore_rest_of_line ();
3015 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3018 /* If name is not NULL, then it is used for marking the beginning of a
3019 function, wherease if it is NULL then it means the function end. */
3021 asmfunc_debug (const char * name
)
3023 static const char * last_name
= NULL
;
3027 gas_assert (last_name
== NULL
);
3030 if (debug_type
== DEBUG_STABS
)
3031 stabs_generate_asm_func (name
, name
);
3035 gas_assert (last_name
!= NULL
);
3037 if (debug_type
== DEBUG_STABS
)
3038 stabs_generate_asm_endfunc (last_name
, last_name
);
3045 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3047 if (codecomposer_syntax
)
3049 switch (asmfunc_state
)
3051 case OUTSIDE_ASMFUNC
:
3052 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3055 case WAITING_ASMFUNC_NAME
:
3056 as_bad (_(".asmfunc repeated."));
3059 case WAITING_ENDASMFUNC
:
3060 as_bad (_(".asmfunc without function."));
3063 demand_empty_rest_of_line ();
3066 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3070 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3072 if (codecomposer_syntax
)
3074 switch (asmfunc_state
)
3076 case OUTSIDE_ASMFUNC
:
3077 as_bad (_(".endasmfunc without a .asmfunc."));
3080 case WAITING_ASMFUNC_NAME
:
3081 as_bad (_(".endasmfunc without function."));
3084 case WAITING_ENDASMFUNC
:
3085 asmfunc_state
= OUTSIDE_ASMFUNC
;
3086 asmfunc_debug (NULL
);
3089 demand_empty_rest_of_line ();
3092 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3096 s_ccs_def (int name
)
3098 if (codecomposer_syntax
)
3101 as_bad (_(".def pseudo-op only available with -mccs flag."));
3104 /* Directives: Literal pools. */
3106 static literal_pool
*
3107 find_literal_pool (void)
3109 literal_pool
* pool
;
3111 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3113 if (pool
->section
== now_seg
3114 && pool
->sub_section
== now_subseg
)
3121 static literal_pool
*
3122 find_or_make_literal_pool (void)
3124 /* Next literal pool ID number. */
3125 static unsigned int latest_pool_num
= 1;
3126 literal_pool
* pool
;
3128 pool
= find_literal_pool ();
3132 /* Create a new pool. */
3133 pool
= (literal_pool
*) xmalloc (sizeof (* pool
));
3137 pool
->next_free_entry
= 0;
3138 pool
->section
= now_seg
;
3139 pool
->sub_section
= now_subseg
;
3140 pool
->next
= list_of_pools
;
3141 pool
->symbol
= NULL
;
3142 pool
->alignment
= 2;
3144 /* Add it to the list. */
3145 list_of_pools
= pool
;
3148 /* New pools, and emptied pools, will have a NULL symbol. */
3149 if (pool
->symbol
== NULL
)
3151 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3152 (valueT
) 0, &zero_address_frag
);
3153 pool
->id
= latest_pool_num
++;
3160 /* Add the literal in the global 'inst'
3161 structure to the relevant literal pool. */
3164 add_to_lit_pool (unsigned int nbytes
)
3166 #define PADDING_SLOT 0x1
3167 #define LIT_ENTRY_SIZE_MASK 0xFF
3168 literal_pool
* pool
;
3169 unsigned int entry
, pool_size
= 0;
3170 bfd_boolean padding_slot_p
= FALSE
;
3176 imm1
= inst
.operands
[1].imm
;
3177 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3178 : inst
.reloc
.exp
.X_unsigned
? 0
3179 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3180 if (target_big_endian
)
3183 imm2
= inst
.operands
[1].imm
;
3187 pool
= find_or_make_literal_pool ();
3189 /* Check if this literal value is already in the pool. */
3190 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3194 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3195 && (inst
.reloc
.exp
.X_op
== O_constant
)
3196 && (pool
->literals
[entry
].X_add_number
3197 == inst
.reloc
.exp
.X_add_number
)
3198 && (pool
->literals
[entry
].X_md
== nbytes
)
3199 && (pool
->literals
[entry
].X_unsigned
3200 == inst
.reloc
.exp
.X_unsigned
))
3203 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3204 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3205 && (pool
->literals
[entry
].X_add_number
3206 == inst
.reloc
.exp
.X_add_number
)
3207 && (pool
->literals
[entry
].X_add_symbol
3208 == inst
.reloc
.exp
.X_add_symbol
)
3209 && (pool
->literals
[entry
].X_op_symbol
3210 == inst
.reloc
.exp
.X_op_symbol
)
3211 && (pool
->literals
[entry
].X_md
== nbytes
))
3214 else if ((nbytes
== 8)
3215 && !(pool_size
& 0x7)
3216 && ((entry
+ 1) != pool
->next_free_entry
)
3217 && (pool
->literals
[entry
].X_op
== O_constant
)
3218 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3219 && (pool
->literals
[entry
].X_unsigned
3220 == inst
.reloc
.exp
.X_unsigned
)
3221 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3222 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3223 && (pool
->literals
[entry
+ 1].X_unsigned
3224 == inst
.reloc
.exp
.X_unsigned
))
3227 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3228 if (padding_slot_p
&& (nbytes
== 4))
3234 /* Do we need to create a new entry? */
3235 if (entry
== pool
->next_free_entry
)
3237 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3239 inst
.error
= _("literal pool overflow");
3245 /* For 8-byte entries, we align to an 8-byte boundary,
3246 and split it into two 4-byte entries, because on 32-bit
3247 host, 8-byte constants are treated as big num, thus
3248 saved in "generic_bignum" which will be overwritten
3249 by later assignments.
3251 We also need to make sure there is enough space for
3254 We also check to make sure the literal operand is a
3256 if (!(inst
.reloc
.exp
.X_op
== O_constant
3257 || inst
.reloc
.exp
.X_op
== O_big
))
3259 inst
.error
= _("invalid type for literal pool");
3262 else if (pool_size
& 0x7)
3264 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3266 inst
.error
= _("literal pool overflow");
3270 pool
->literals
[entry
] = inst
.reloc
.exp
;
3271 pool
->literals
[entry
].X_add_number
= 0;
3272 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3273 pool
->next_free_entry
+= 1;
3276 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3278 inst
.error
= _("literal pool overflow");
3282 pool
->literals
[entry
] = inst
.reloc
.exp
;
3283 pool
->literals
[entry
].X_op
= O_constant
;
3284 pool
->literals
[entry
].X_add_number
= imm1
;
3285 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3286 pool
->literals
[entry
++].X_md
= 4;
3287 pool
->literals
[entry
] = inst
.reloc
.exp
;
3288 pool
->literals
[entry
].X_op
= O_constant
;
3289 pool
->literals
[entry
].X_add_number
= imm2
;
3290 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3291 pool
->literals
[entry
].X_md
= 4;
3292 pool
->alignment
= 3;
3293 pool
->next_free_entry
+= 1;
3297 pool
->literals
[entry
] = inst
.reloc
.exp
;
3298 pool
->literals
[entry
].X_md
= 4;
3302 /* PR ld/12974: Record the location of the first source line to reference
3303 this entry in the literal pool. If it turns out during linking that the
3304 symbol does not exist we will be able to give an accurate line number for
3305 the (first use of the) missing reference. */
3306 if (debug_type
== DEBUG_DWARF2
)
3307 dwarf2_where (pool
->locs
+ entry
);
3309 pool
->next_free_entry
+= 1;
3311 else if (padding_slot_p
)
3313 pool
->literals
[entry
] = inst
.reloc
.exp
;
3314 pool
->literals
[entry
].X_md
= nbytes
;
3317 inst
.reloc
.exp
.X_op
= O_symbol
;
3318 inst
.reloc
.exp
.X_add_number
= pool_size
;
3319 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3325 tc_start_label_without_colon (void)
3327 bfd_boolean ret
= TRUE
;
3329 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3331 const char *label
= input_line_pointer
;
3333 while (!is_end_of_line
[(int) label
[-1]])
3338 as_bad (_("Invalid label '%s'"), label
);
3342 asmfunc_debug (label
);
3344 asmfunc_state
= WAITING_ENDASMFUNC
;
3350 /* Can't use symbol_new here, so have to create a symbol and then at
3351 a later date assign it a value. Thats what these functions do. */
3354 symbol_locate (symbolS
* symbolP
,
3355 const char * name
, /* It is copied, the caller can modify. */
3356 segT segment
, /* Segment identifier (SEG_<something>). */
3357 valueT valu
, /* Symbol value. */
3358 fragS
* frag
) /* Associated fragment. */
3361 char * preserved_copy_of_name
;
3363 name_length
= strlen (name
) + 1; /* +1 for \0. */
3364 obstack_grow (¬es
, name
, name_length
);
3365 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3367 #ifdef tc_canonicalize_symbol_name
3368 preserved_copy_of_name
=
3369 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3372 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3374 S_SET_SEGMENT (symbolP
, segment
);
3375 S_SET_VALUE (symbolP
, valu
);
3376 symbol_clear_list_pointers (symbolP
);
3378 symbol_set_frag (symbolP
, frag
);
3380 /* Link to end of symbol chain. */
3382 extern int symbol_table_frozen
;
3384 if (symbol_table_frozen
)
3388 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3390 obj_symbol_new_hook (symbolP
);
3392 #ifdef tc_symbol_new_hook
3393 tc_symbol_new_hook (symbolP
);
3397 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3398 #endif /* DEBUG_SYMS */
3402 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3405 literal_pool
* pool
;
3408 pool
= find_literal_pool ();
3410 || pool
->symbol
== NULL
3411 || pool
->next_free_entry
== 0)
3414 /* Align pool as you have word accesses.
3415 Only make a frag if we have to. */
3417 frag_align (pool
->alignment
, 0, 0);
3419 record_alignment (now_seg
, 2);
3422 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3423 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3425 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3427 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3428 (valueT
) frag_now_fix (), frag_now
);
3429 symbol_table_insert (pool
->symbol
);
3431 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3433 #if defined OBJ_COFF || defined OBJ_ELF
3434 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3437 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3440 if (debug_type
== DEBUG_DWARF2
)
3441 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3443 /* First output the expression in the instruction to the pool. */
3444 emit_expr (&(pool
->literals
[entry
]),
3445 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3448 /* Mark the pool as empty. */
3449 pool
->next_free_entry
= 0;
3450 pool
->symbol
= NULL
;
3454 /* Forward declarations for functions below, in the MD interface
3456 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3457 static valueT
create_unwind_entry (int);
3458 static void start_unwind_section (const segT
, int);
3459 static void add_unwind_opcode (valueT
, int);
3460 static void flush_pending_unwind (void);
3462 /* Directives: Data. */
3465 s_arm_elf_cons (int nbytes
)
3469 #ifdef md_flush_pending_output
3470 md_flush_pending_output ();
3473 if (is_it_end_of_statement ())
3475 demand_empty_rest_of_line ();
3479 #ifdef md_cons_align
3480 md_cons_align (nbytes
);
3483 mapping_state (MAP_DATA
);
3487 char *base
= input_line_pointer
;
3491 if (exp
.X_op
!= O_symbol
)
3492 emit_expr (&exp
, (unsigned int) nbytes
);
3495 char *before_reloc
= input_line_pointer
;
3496 reloc
= parse_reloc (&input_line_pointer
);
3499 as_bad (_("unrecognized relocation suffix"));
3500 ignore_rest_of_line ();
3503 else if (reloc
== BFD_RELOC_UNUSED
)
3504 emit_expr (&exp
, (unsigned int) nbytes
);
3507 reloc_howto_type
*howto
= (reloc_howto_type
*)
3508 bfd_reloc_type_lookup (stdoutput
,
3509 (bfd_reloc_code_real_type
) reloc
);
3510 int size
= bfd_get_reloc_size (howto
);
3512 if (reloc
== BFD_RELOC_ARM_PLT32
)
3514 as_bad (_("(plt) is only valid on branch targets"));
3515 reloc
= BFD_RELOC_UNUSED
;
3520 as_bad (_("%s relocations do not fit in %d bytes"),
3521 howto
->name
, nbytes
);
3524 /* We've parsed an expression stopping at O_symbol.
3525 But there may be more expression left now that we
3526 have parsed the relocation marker. Parse it again.
3527 XXX Surely there is a cleaner way to do this. */
3528 char *p
= input_line_pointer
;
3530 char *save_buf
= (char *) alloca (input_line_pointer
- base
);
3531 memcpy (save_buf
, base
, input_line_pointer
- base
);
3532 memmove (base
+ (input_line_pointer
- before_reloc
),
3533 base
, before_reloc
- base
);
3535 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3537 memcpy (base
, save_buf
, p
- base
);
3539 offset
= nbytes
- size
;
3540 p
= frag_more (nbytes
);
3541 memset (p
, 0, nbytes
);
3542 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3543 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3548 while (*input_line_pointer
++ == ',');
3550 /* Put terminator back into stream. */
3551 input_line_pointer
--;
3552 demand_empty_rest_of_line ();
3555 /* Emit an expression containing a 32-bit thumb instruction.
3556 Implementation based on put_thumb32_insn. */
3559 emit_thumb32_expr (expressionS
* exp
)
3561 expressionS exp_high
= *exp
;
3563 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3564 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3565 exp
->X_add_number
&= 0xffff;
3566 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3569 /* Guess the instruction size based on the opcode. */
3572 thumb_insn_size (int opcode
)
3574 if ((unsigned int) opcode
< 0xe800u
)
3576 else if ((unsigned int) opcode
>= 0xe8000000u
)
3583 emit_insn (expressionS
*exp
, int nbytes
)
3587 if (exp
->X_op
== O_constant
)
3592 size
= thumb_insn_size (exp
->X_add_number
);
3596 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3598 as_bad (_(".inst.n operand too big. "\
3599 "Use .inst.w instead"));
3604 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3605 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3607 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3609 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3610 emit_thumb32_expr (exp
);
3612 emit_expr (exp
, (unsigned int) size
);
3614 it_fsm_post_encode ();
3618 as_bad (_("cannot determine Thumb instruction size. " \
3619 "Use .inst.n/.inst.w instead"));
3622 as_bad (_("constant expression required"));
3627 /* Like s_arm_elf_cons but do not use md_cons_align and
3628 set the mapping state to MAP_ARM/MAP_THUMB. */
3631 s_arm_elf_inst (int nbytes
)
3633 if (is_it_end_of_statement ())
3635 demand_empty_rest_of_line ();
3639 /* Calling mapping_state () here will not change ARM/THUMB,
3640 but will ensure not to be in DATA state. */
3643 mapping_state (MAP_THUMB
);
3648 as_bad (_("width suffixes are invalid in ARM mode"));
3649 ignore_rest_of_line ();
3655 mapping_state (MAP_ARM
);
3664 if (! emit_insn (& exp
, nbytes
))
3666 ignore_rest_of_line ();
3670 while (*input_line_pointer
++ == ',');
3672 /* Put terminator back into stream. */
3673 input_line_pointer
--;
3674 demand_empty_rest_of_line ();
3677 /* Parse a .rel31 directive. */
3680 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3687 if (*input_line_pointer
== '1')
3688 highbit
= 0x80000000;
3689 else if (*input_line_pointer
!= '0')
3690 as_bad (_("expected 0 or 1"));
3692 input_line_pointer
++;
3693 if (*input_line_pointer
!= ',')
3694 as_bad (_("missing comma"));
3695 input_line_pointer
++;
3697 #ifdef md_flush_pending_output
3698 md_flush_pending_output ();
3701 #ifdef md_cons_align
3705 mapping_state (MAP_DATA
);
3710 md_number_to_chars (p
, highbit
, 4);
3711 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3712 BFD_RELOC_ARM_PREL31
);
3714 demand_empty_rest_of_line ();
3717 /* Directives: AEABI stack-unwind tables. */
3719 /* Parse an unwind_fnstart directive. Simply records the current location. */
3722 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3724 demand_empty_rest_of_line ();
3725 if (unwind
.proc_start
)
3727 as_bad (_("duplicate .fnstart directive"));
3731 /* Mark the start of the function. */
3732 unwind
.proc_start
= expr_build_dot ();
3734 /* Reset the rest of the unwind info. */
3735 unwind
.opcode_count
= 0;
3736 unwind
.table_entry
= NULL
;
3737 unwind
.personality_routine
= NULL
;
3738 unwind
.personality_index
= -1;
3739 unwind
.frame_size
= 0;
3740 unwind
.fp_offset
= 0;
3741 unwind
.fp_reg
= REG_SP
;
3743 unwind
.sp_restored
= 0;
3747 /* Parse a handlerdata directive. Creates the exception handling table entry
3748 for the function. */
3751 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3753 demand_empty_rest_of_line ();
3754 if (!unwind
.proc_start
)
3755 as_bad (MISSING_FNSTART
);
3757 if (unwind
.table_entry
)
3758 as_bad (_("duplicate .handlerdata directive"));
3760 create_unwind_entry (1);
3763 /* Parse an unwind_fnend directive. Generates the index table entry. */
3766 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3771 unsigned int marked_pr_dependency
;
3773 demand_empty_rest_of_line ();
3775 if (!unwind
.proc_start
)
3777 as_bad (_(".fnend directive without .fnstart"));
3781 /* Add eh table entry. */
3782 if (unwind
.table_entry
== NULL
)
3783 val
= create_unwind_entry (0);
3787 /* Add index table entry. This is two words. */
3788 start_unwind_section (unwind
.saved_seg
, 1);
3789 frag_align (2, 0, 0);
3790 record_alignment (now_seg
, 2);
3792 ptr
= frag_more (8);
3794 where
= frag_now_fix () - 8;
3796 /* Self relative offset of the function start. */
3797 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3798 BFD_RELOC_ARM_PREL31
);
3800 /* Indicate dependency on EHABI-defined personality routines to the
3801 linker, if it hasn't been done already. */
3802 marked_pr_dependency
3803 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3804 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3805 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3807 static const char *const name
[] =
3809 "__aeabi_unwind_cpp_pr0",
3810 "__aeabi_unwind_cpp_pr1",
3811 "__aeabi_unwind_cpp_pr2"
3813 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3814 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3815 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3816 |= 1 << unwind
.personality_index
;
3820 /* Inline exception table entry. */
3821 md_number_to_chars (ptr
+ 4, val
, 4);
3823 /* Self relative offset of the table entry. */
3824 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3825 BFD_RELOC_ARM_PREL31
);
3827 /* Restore the original section. */
3828 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3830 unwind
.proc_start
= NULL
;
3834 /* Parse an unwind_cantunwind directive. */
3837 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3839 demand_empty_rest_of_line ();
3840 if (!unwind
.proc_start
)
3841 as_bad (MISSING_FNSTART
);
3843 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3844 as_bad (_("personality routine specified for cantunwind frame"));
3846 unwind
.personality_index
= -2;
3850 /* Parse a personalityindex directive. */
3853 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3857 if (!unwind
.proc_start
)
3858 as_bad (MISSING_FNSTART
);
3860 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3861 as_bad (_("duplicate .personalityindex directive"));
3865 if (exp
.X_op
!= O_constant
3866 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3868 as_bad (_("bad personality routine number"));
3869 ignore_rest_of_line ();
3873 unwind
.personality_index
= exp
.X_add_number
;
3875 demand_empty_rest_of_line ();
3879 /* Parse a personality directive. */
3882 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3886 if (!unwind
.proc_start
)
3887 as_bad (MISSING_FNSTART
);
3889 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3890 as_bad (_("duplicate .personality directive"));
3892 c
= get_symbol_name (& name
);
3893 p
= input_line_pointer
;
3895 ++ input_line_pointer
;
3896 unwind
.personality_routine
= symbol_find_or_make (name
);
3898 demand_empty_rest_of_line ();
3902 /* Parse a directive saving core registers. */
3905 s_arm_unwind_save_core (void)
3911 range
= parse_reg_list (&input_line_pointer
);
3914 as_bad (_("expected register list"));
3915 ignore_rest_of_line ();
3919 demand_empty_rest_of_line ();
3921 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3922 into .unwind_save {..., sp...}. We aren't bothered about the value of
3923 ip because it is clobbered by calls. */
3924 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3925 && (range
& 0x3000) == 0x1000)
3927 unwind
.opcode_count
--;
3928 unwind
.sp_restored
= 0;
3929 range
= (range
| 0x2000) & ~0x1000;
3930 unwind
.pending_offset
= 0;
3936 /* See if we can use the short opcodes. These pop a block of up to 8
3937 registers starting with r4, plus maybe r14. */
3938 for (n
= 0; n
< 8; n
++)
3940 /* Break at the first non-saved register. */
3941 if ((range
& (1 << (n
+ 4))) == 0)
3944 /* See if there are any other bits set. */
3945 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3947 /* Use the long form. */
3948 op
= 0x8000 | ((range
>> 4) & 0xfff);
3949 add_unwind_opcode (op
, 2);
3953 /* Use the short form. */
3955 op
= 0xa8; /* Pop r14. */
3957 op
= 0xa0; /* Do not pop r14. */
3959 add_unwind_opcode (op
, 1);
3966 op
= 0xb100 | (range
& 0xf);
3967 add_unwind_opcode (op
, 2);
3970 /* Record the number of bytes pushed. */
3971 for (n
= 0; n
< 16; n
++)
3973 if (range
& (1 << n
))
3974 unwind
.frame_size
+= 4;
3979 /* Parse a directive saving FPA registers. */
3982 s_arm_unwind_save_fpa (int reg
)
3988 /* Get Number of registers to transfer. */
3989 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3992 exp
.X_op
= O_illegal
;
3994 if (exp
.X_op
!= O_constant
)
3996 as_bad (_("expected , <constant>"));
3997 ignore_rest_of_line ();
4001 num_regs
= exp
.X_add_number
;
4003 if (num_regs
< 1 || num_regs
> 4)
4005 as_bad (_("number of registers must be in the range [1:4]"));
4006 ignore_rest_of_line ();
4010 demand_empty_rest_of_line ();
4015 op
= 0xb4 | (num_regs
- 1);
4016 add_unwind_opcode (op
, 1);
4021 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4022 add_unwind_opcode (op
, 2);
4024 unwind
.frame_size
+= num_regs
* 12;
4028 /* Parse a directive saving VFP registers for ARMv6 and above. */
4031 s_arm_unwind_save_vfp_armv6 (void)
4036 int num_vfpv3_regs
= 0;
4037 int num_regs_below_16
;
4039 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4042 as_bad (_("expected register list"));
4043 ignore_rest_of_line ();
4047 demand_empty_rest_of_line ();
4049 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4050 than FSTMX/FLDMX-style ones). */
4052 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4054 num_vfpv3_regs
= count
;
4055 else if (start
+ count
> 16)
4056 num_vfpv3_regs
= start
+ count
- 16;
4058 if (num_vfpv3_regs
> 0)
4060 int start_offset
= start
> 16 ? start
- 16 : 0;
4061 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4062 add_unwind_opcode (op
, 2);
4065 /* Generate opcode for registers numbered in the range 0 .. 15. */
4066 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4067 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4068 if (num_regs_below_16
> 0)
4070 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4071 add_unwind_opcode (op
, 2);
4074 unwind
.frame_size
+= count
* 8;
4078 /* Parse a directive saving VFP registers for pre-ARMv6. */
4081 s_arm_unwind_save_vfp (void)
4087 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4090 as_bad (_("expected register list"));
4091 ignore_rest_of_line ();
4095 demand_empty_rest_of_line ();
4100 op
= 0xb8 | (count
- 1);
4101 add_unwind_opcode (op
, 1);
4106 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4107 add_unwind_opcode (op
, 2);
4109 unwind
.frame_size
+= count
* 8 + 4;
4113 /* Parse a directive saving iWMMXt data registers. */
4116 s_arm_unwind_save_mmxwr (void)
4124 if (*input_line_pointer
== '{')
4125 input_line_pointer
++;
4129 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4133 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4138 as_tsktsk (_("register list not in ascending order"));
4141 if (*input_line_pointer
== '-')
4143 input_line_pointer
++;
4144 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4147 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4150 else if (reg
>= hi_reg
)
4152 as_bad (_("bad register range"));
4155 for (; reg
< hi_reg
; reg
++)
4159 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4161 skip_past_char (&input_line_pointer
, '}');
4163 demand_empty_rest_of_line ();
4165 /* Generate any deferred opcodes because we're going to be looking at
4167 flush_pending_unwind ();
4169 for (i
= 0; i
< 16; i
++)
4171 if (mask
& (1 << i
))
4172 unwind
.frame_size
+= 8;
4175 /* Attempt to combine with a previous opcode. We do this because gcc
4176 likes to output separate unwind directives for a single block of
4178 if (unwind
.opcode_count
> 0)
4180 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4181 if ((i
& 0xf8) == 0xc0)
4184 /* Only merge if the blocks are contiguous. */
4187 if ((mask
& 0xfe00) == (1 << 9))
4189 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4190 unwind
.opcode_count
--;
4193 else if (i
== 6 && unwind
.opcode_count
>= 2)
4195 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4199 op
= 0xffff << (reg
- 1);
4201 && ((mask
& op
) == (1u << (reg
- 1))))
4203 op
= (1 << (reg
+ i
+ 1)) - 1;
4204 op
&= ~((1 << reg
) - 1);
4206 unwind
.opcode_count
-= 2;
4213 /* We want to generate opcodes in the order the registers have been
4214 saved, ie. descending order. */
4215 for (reg
= 15; reg
>= -1; reg
--)
4217 /* Save registers in blocks. */
4219 || !(mask
& (1 << reg
)))
4221 /* We found an unsaved reg. Generate opcodes to save the
4228 op
= 0xc0 | (hi_reg
- 10);
4229 add_unwind_opcode (op
, 1);
4234 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4235 add_unwind_opcode (op
, 2);
4244 ignore_rest_of_line ();
4248 s_arm_unwind_save_mmxwcg (void)
4255 if (*input_line_pointer
== '{')
4256 input_line_pointer
++;
4258 skip_whitespace (input_line_pointer
);
4262 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4266 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4272 as_tsktsk (_("register list not in ascending order"));
4275 if (*input_line_pointer
== '-')
4277 input_line_pointer
++;
4278 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4281 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4284 else if (reg
>= hi_reg
)
4286 as_bad (_("bad register range"));
4289 for (; reg
< hi_reg
; reg
++)
4293 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4295 skip_past_char (&input_line_pointer
, '}');
4297 demand_empty_rest_of_line ();
4299 /* Generate any deferred opcodes because we're going to be looking at
4301 flush_pending_unwind ();
4303 for (reg
= 0; reg
< 16; reg
++)
4305 if (mask
& (1 << reg
))
4306 unwind
.frame_size
+= 4;
4309 add_unwind_opcode (op
, 2);
4312 ignore_rest_of_line ();
4316 /* Parse an unwind_save directive.
4317 If the argument is non-zero, this is a .vsave directive. */
4320 s_arm_unwind_save (int arch_v6
)
4323 struct reg_entry
*reg
;
4324 bfd_boolean had_brace
= FALSE
;
4326 if (!unwind
.proc_start
)
4327 as_bad (MISSING_FNSTART
);
4329 /* Figure out what sort of save we have. */
4330 peek
= input_line_pointer
;
4338 reg
= arm_reg_parse_multi (&peek
);
4342 as_bad (_("register expected"));
4343 ignore_rest_of_line ();
4352 as_bad (_("FPA .unwind_save does not take a register list"));
4353 ignore_rest_of_line ();
4356 input_line_pointer
= peek
;
4357 s_arm_unwind_save_fpa (reg
->number
);
4361 s_arm_unwind_save_core ();
4366 s_arm_unwind_save_vfp_armv6 ();
4368 s_arm_unwind_save_vfp ();
4371 case REG_TYPE_MMXWR
:
4372 s_arm_unwind_save_mmxwr ();
4375 case REG_TYPE_MMXWCG
:
4376 s_arm_unwind_save_mmxwcg ();
4380 as_bad (_(".unwind_save does not support this kind of register"));
4381 ignore_rest_of_line ();
4386 /* Parse an unwind_movsp directive. */
4389 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4395 if (!unwind
.proc_start
)
4396 as_bad (MISSING_FNSTART
);
4398 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4401 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4402 ignore_rest_of_line ();
4406 /* Optional constant. */
4407 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4409 if (immediate_for_directive (&offset
) == FAIL
)
4415 demand_empty_rest_of_line ();
4417 if (reg
== REG_SP
|| reg
== REG_PC
)
4419 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4423 if (unwind
.fp_reg
!= REG_SP
)
4424 as_bad (_("unexpected .unwind_movsp directive"));
4426 /* Generate opcode to restore the value. */
4428 add_unwind_opcode (op
, 1);
4430 /* Record the information for later. */
4431 unwind
.fp_reg
= reg
;
4432 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4433 unwind
.sp_restored
= 1;
4436 /* Parse an unwind_pad directive. */
4439 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4443 if (!unwind
.proc_start
)
4444 as_bad (MISSING_FNSTART
);
4446 if (immediate_for_directive (&offset
) == FAIL
)
4451 as_bad (_("stack increment must be multiple of 4"));
4452 ignore_rest_of_line ();
4456 /* Don't generate any opcodes, just record the details for later. */
4457 unwind
.frame_size
+= offset
;
4458 unwind
.pending_offset
+= offset
;
4460 demand_empty_rest_of_line ();
4463 /* Parse an unwind_setfp directive. */
4466 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4472 if (!unwind
.proc_start
)
4473 as_bad (MISSING_FNSTART
);
4475 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4476 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4479 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4481 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4483 as_bad (_("expected <reg>, <reg>"));
4484 ignore_rest_of_line ();
4488 /* Optional constant. */
4489 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4491 if (immediate_for_directive (&offset
) == FAIL
)
4497 demand_empty_rest_of_line ();
4499 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4501 as_bad (_("register must be either sp or set by a previous"
4502 "unwind_movsp directive"));
4506 /* Don't generate any opcodes, just record the information for later. */
4507 unwind
.fp_reg
= fp_reg
;
4509 if (sp_reg
== REG_SP
)
4510 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4512 unwind
.fp_offset
-= offset
;
4515 /* Parse an unwind_raw directive. */
4518 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4521 /* This is an arbitrary limit. */
4522 unsigned char op
[16];
4525 if (!unwind
.proc_start
)
4526 as_bad (MISSING_FNSTART
);
4529 if (exp
.X_op
== O_constant
4530 && skip_past_comma (&input_line_pointer
) != FAIL
)
4532 unwind
.frame_size
+= exp
.X_add_number
;
4536 exp
.X_op
= O_illegal
;
4538 if (exp
.X_op
!= O_constant
)
4540 as_bad (_("expected <offset>, <opcode>"));
4541 ignore_rest_of_line ();
4547 /* Parse the opcode. */
4552 as_bad (_("unwind opcode too long"));
4553 ignore_rest_of_line ();
4555 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4557 as_bad (_("invalid unwind opcode"));
4558 ignore_rest_of_line ();
4561 op
[count
++] = exp
.X_add_number
;
4563 /* Parse the next byte. */
4564 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4570 /* Add the opcode bytes in reverse order. */
4572 add_unwind_opcode (op
[count
], 1);
4574 demand_empty_rest_of_line ();
4578 /* Parse a .eabi_attribute directive. */
4581 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4583 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4585 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4586 attributes_set_explicitly
[tag
] = 1;
4589 /* Emit a tls fix for the symbol. */
4592 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4596 #ifdef md_flush_pending_output
4597 md_flush_pending_output ();
4600 #ifdef md_cons_align
4604 /* Since we're just labelling the code, there's no need to define a
4607 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4608 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4609 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4610 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4612 #endif /* OBJ_ELF */
4614 static void s_arm_arch (int);
4615 static void s_arm_object_arch (int);
4616 static void s_arm_cpu (int);
4617 static void s_arm_fpu (int);
4618 static void s_arm_arch_extension (int);
4623 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4630 if (exp
.X_op
== O_symbol
)
4631 exp
.X_op
= O_secrel
;
4633 emit_expr (&exp
, 4);
4635 while (*input_line_pointer
++ == ',');
4637 input_line_pointer
--;
4638 demand_empty_rest_of_line ();
4642 /* This table describes all the machine specific pseudo-ops the assembler
4643 has to support. The fields are:
4644 pseudo-op name without dot
4645 function to call to execute this pseudo-op
4646 Integer arg to pass to the function. */
4648 const pseudo_typeS md_pseudo_table
[] =
4650 /* Never called because '.req' does not start a line. */
4651 { "req", s_req
, 0 },
4652 /* Following two are likewise never called. */
4655 { "unreq", s_unreq
, 0 },
4656 { "bss", s_bss
, 0 },
4657 { "align", s_align_ptwo
, 2 },
4658 { "arm", s_arm
, 0 },
4659 { "thumb", s_thumb
, 0 },
4660 { "code", s_code
, 0 },
4661 { "force_thumb", s_force_thumb
, 0 },
4662 { "thumb_func", s_thumb_func
, 0 },
4663 { "thumb_set", s_thumb_set
, 0 },
4664 { "even", s_even
, 0 },
4665 { "ltorg", s_ltorg
, 0 },
4666 { "pool", s_ltorg
, 0 },
4667 { "syntax", s_syntax
, 0 },
4668 { "cpu", s_arm_cpu
, 0 },
4669 { "arch", s_arm_arch
, 0 },
4670 { "object_arch", s_arm_object_arch
, 0 },
4671 { "fpu", s_arm_fpu
, 0 },
4672 { "arch_extension", s_arm_arch_extension
, 0 },
4674 { "word", s_arm_elf_cons
, 4 },
4675 { "long", s_arm_elf_cons
, 4 },
4676 { "inst.n", s_arm_elf_inst
, 2 },
4677 { "inst.w", s_arm_elf_inst
, 4 },
4678 { "inst", s_arm_elf_inst
, 0 },
4679 { "rel31", s_arm_rel31
, 0 },
4680 { "fnstart", s_arm_unwind_fnstart
, 0 },
4681 { "fnend", s_arm_unwind_fnend
, 0 },
4682 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4683 { "personality", s_arm_unwind_personality
, 0 },
4684 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4685 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4686 { "save", s_arm_unwind_save
, 0 },
4687 { "vsave", s_arm_unwind_save
, 1 },
4688 { "movsp", s_arm_unwind_movsp
, 0 },
4689 { "pad", s_arm_unwind_pad
, 0 },
4690 { "setfp", s_arm_unwind_setfp
, 0 },
4691 { "unwind_raw", s_arm_unwind_raw
, 0 },
4692 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4693 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4697 /* These are used for dwarf. */
4701 /* These are used for dwarf2. */
4702 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4703 { "loc", dwarf2_directive_loc
, 0 },
4704 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4706 { "extend", float_cons
, 'x' },
4707 { "ldouble", float_cons
, 'x' },
4708 { "packed", float_cons
, 'p' },
4710 {"secrel32", pe_directive_secrel
, 0},
4713 /* These are for compatibility with CodeComposer Studio. */
4714 {"ref", s_ccs_ref
, 0},
4715 {"def", s_ccs_def
, 0},
4716 {"asmfunc", s_ccs_asmfunc
, 0},
4717 {"endasmfunc", s_ccs_endasmfunc
, 0},
4722 /* Parser functions used exclusively in instruction operands. */
4724 /* Generic immediate-value read function for use in insn parsing.
4725 STR points to the beginning of the immediate (the leading #);
4726 VAL receives the value; if the value is outside [MIN, MAX]
4727 issue an error. PREFIX_OPT is true if the immediate prefix is
4731 parse_immediate (char **str
, int *val
, int min
, int max
,
4732 bfd_boolean prefix_opt
)
4735 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4736 if (exp
.X_op
!= O_constant
)
4738 inst
.error
= _("constant expression required");
4742 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4744 inst
.error
= _("immediate value out of range");
4748 *val
= exp
.X_add_number
;
4752 /* Less-generic immediate-value read function with the possibility of loading a
4753 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4754 instructions. Puts the result directly in inst.operands[i]. */
4757 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4758 bfd_boolean allow_symbol_p
)
4761 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4764 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4766 if (exp_p
->X_op
== O_constant
)
4768 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4769 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4770 O_constant. We have to be careful not to break compilation for
4771 32-bit X_add_number, though. */
4772 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4774 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4775 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4777 inst
.operands
[i
].regisimm
= 1;
4780 else if (exp_p
->X_op
== O_big
4781 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4783 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4785 /* Bignums have their least significant bits in
4786 generic_bignum[0]. Make sure we put 32 bits in imm and
4787 32 bits in reg, in a (hopefully) portable way. */
4788 gas_assert (parts
!= 0);
4790 /* Make sure that the number is not too big.
4791 PR 11972: Bignums can now be sign-extended to the
4792 size of a .octa so check that the out of range bits
4793 are all zero or all one. */
4794 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4796 LITTLENUM_TYPE m
= -1;
4798 if (generic_bignum
[parts
* 2] != 0
4799 && generic_bignum
[parts
* 2] != m
)
4802 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4803 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4807 inst
.operands
[i
].imm
= 0;
4808 for (j
= 0; j
< parts
; j
++, idx
++)
4809 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4810 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4811 inst
.operands
[i
].reg
= 0;
4812 for (j
= 0; j
< parts
; j
++, idx
++)
4813 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4814 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4815 inst
.operands
[i
].regisimm
= 1;
4817 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4825 /* Returns the pseudo-register number of an FPA immediate constant,
4826 or FAIL if there isn't a valid constant here. */
4829 parse_fpa_immediate (char ** str
)
4831 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4837 /* First try and match exact strings, this is to guarantee
4838 that some formats will work even for cross assembly. */
4840 for (i
= 0; fp_const
[i
]; i
++)
4842 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4846 *str
+= strlen (fp_const
[i
]);
4847 if (is_end_of_line
[(unsigned char) **str
])
4853 /* Just because we didn't get a match doesn't mean that the constant
4854 isn't valid, just that it is in a format that we don't
4855 automatically recognize. Try parsing it with the standard
4856 expression routines. */
4858 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4860 /* Look for a raw floating point number. */
4861 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4862 && is_end_of_line
[(unsigned char) *save_in
])
4864 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4866 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4868 if (words
[j
] != fp_values
[i
][j
])
4872 if (j
== MAX_LITTLENUMS
)
4880 /* Try and parse a more complex expression, this will probably fail
4881 unless the code uses a floating point prefix (eg "0f"). */
4882 save_in
= input_line_pointer
;
4883 input_line_pointer
= *str
;
4884 if (expression (&exp
) == absolute_section
4885 && exp
.X_op
== O_big
4886 && exp
.X_add_number
< 0)
4888 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4890 #define X_PRECISION 5
4891 #define E_PRECISION 15L
4892 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4894 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4896 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4898 if (words
[j
] != fp_values
[i
][j
])
4902 if (j
== MAX_LITTLENUMS
)
4904 *str
= input_line_pointer
;
4905 input_line_pointer
= save_in
;
4912 *str
= input_line_pointer
;
4913 input_line_pointer
= save_in
;
4914 inst
.error
= _("invalid FPA immediate expression");
4918 /* Returns 1 if a number has "quarter-precision" float format
4919 0baBbbbbbc defgh000 00000000 00000000. */
4922 is_quarter_float (unsigned imm
)
4924 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4925 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4929 /* Detect the presence of a floating point or integer zero constant,
4933 parse_ifimm_zero (char **in
)
4937 if (!is_immediate_prefix (**in
))
4942 /* Accept #0x0 as a synonym for #0. */
4943 if (strncmp (*in
, "0x", 2) == 0)
4946 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4951 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4952 &generic_floating_point_number
);
4955 && generic_floating_point_number
.sign
== '+'
4956 && (generic_floating_point_number
.low
4957 > generic_floating_point_number
.leader
))
4963 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4964 0baBbbbbbc defgh000 00000000 00000000.
4965 The zero and minus-zero cases need special handling, since they can't be
4966 encoded in the "quarter-precision" float format, but can nonetheless be
4967 loaded as integer constants. */
4970 parse_qfloat_immediate (char **ccp
, int *immed
)
4974 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4975 int found_fpchar
= 0;
4977 skip_past_char (&str
, '#');
4979 /* We must not accidentally parse an integer as a floating-point number. Make
4980 sure that the value we parse is not an integer by checking for special
4981 characters '.' or 'e'.
4982 FIXME: This is a horrible hack, but doing better is tricky because type
4983 information isn't in a very usable state at parse time. */
4985 skip_whitespace (fpnum
);
4987 if (strncmp (fpnum
, "0x", 2) == 0)
4991 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4992 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5002 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5004 unsigned fpword
= 0;
5007 /* Our FP word must be 32 bits (single-precision FP). */
5008 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5010 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5014 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5027 /* Shift operands. */
5030 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5033 struct asm_shift_name
5036 enum shift_kind kind
;
5039 /* Third argument to parse_shift. */
5040 enum parse_shift_mode
5042 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5043 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5044 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5045 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5046 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5049 /* Parse a <shift> specifier on an ARM data processing instruction.
5050 This has three forms:
5052 (LSL|LSR|ASL|ASR|ROR) Rs
5053 (LSL|LSR|ASL|ASR|ROR) #imm
5056 Note that ASL is assimilated to LSL in the instruction encoding, and
5057 RRX to ROR #0 (which cannot be written as such). */
5060 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5062 const struct asm_shift_name
*shift_name
;
5063 enum shift_kind shift
;
5068 for (p
= *str
; ISALPHA (*p
); p
++)
5073 inst
.error
= _("shift expression expected");
5077 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5080 if (shift_name
== NULL
)
5082 inst
.error
= _("shift expression expected");
5086 shift
= shift_name
->kind
;
5090 case NO_SHIFT_RESTRICT
:
5091 case SHIFT_IMMEDIATE
: break;
5093 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5094 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5096 inst
.error
= _("'LSL' or 'ASR' required");
5101 case SHIFT_LSL_IMMEDIATE
:
5102 if (shift
!= SHIFT_LSL
)
5104 inst
.error
= _("'LSL' required");
5109 case SHIFT_ASR_IMMEDIATE
:
5110 if (shift
!= SHIFT_ASR
)
5112 inst
.error
= _("'ASR' required");
5120 if (shift
!= SHIFT_RRX
)
5122 /* Whitespace can appear here if the next thing is a bare digit. */
5123 skip_whitespace (p
);
5125 if (mode
== NO_SHIFT_RESTRICT
5126 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5128 inst
.operands
[i
].imm
= reg
;
5129 inst
.operands
[i
].immisreg
= 1;
5131 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5134 inst
.operands
[i
].shift_kind
= shift
;
5135 inst
.operands
[i
].shifted
= 1;
5140 /* Parse a <shifter_operand> for an ARM data processing instruction:
5143 #<immediate>, <rotate>
5147 where <shift> is defined by parse_shift above, and <rotate> is a
5148 multiple of 2 between 0 and 30. Validation of immediate operands
5149 is deferred to md_apply_fix. */
5152 parse_shifter_operand (char **str
, int i
)
5157 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5159 inst
.operands
[i
].reg
= value
;
5160 inst
.operands
[i
].isreg
= 1;
5162 /* parse_shift will override this if appropriate */
5163 inst
.reloc
.exp
.X_op
= O_constant
;
5164 inst
.reloc
.exp
.X_add_number
= 0;
5166 if (skip_past_comma (str
) == FAIL
)
5169 /* Shift operation on register. */
5170 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5173 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5176 if (skip_past_comma (str
) == SUCCESS
)
5178 /* #x, y -- ie explicit rotation by Y. */
5179 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5182 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5184 inst
.error
= _("constant expression expected");
5188 value
= exp
.X_add_number
;
5189 if (value
< 0 || value
> 30 || value
% 2 != 0)
5191 inst
.error
= _("invalid rotation");
5194 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5196 inst
.error
= _("invalid constant");
5200 /* Encode as specified. */
5201 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5205 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5206 inst
.reloc
.pc_rel
= 0;
5210 /* Group relocation information. Each entry in the table contains the
5211 textual name of the relocation as may appear in assembler source
5212 and must end with a colon.
5213 Along with this textual name are the relocation codes to be used if
5214 the corresponding instruction is an ALU instruction (ADD or SUB only),
5215 an LDR, an LDRS, or an LDC. */
5217 struct group_reloc_table_entry
5228 /* Varieties of non-ALU group relocation. */
5235 static struct group_reloc_table_entry group_reloc_table
[] =
5236 { /* Program counter relative: */
5238 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5243 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5244 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5245 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5246 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5248 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5253 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5254 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5255 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5256 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5258 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5259 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5260 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5261 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5262 /* Section base relative */
5264 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5269 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5270 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5271 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5272 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5274 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5279 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5280 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5281 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5282 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5284 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5285 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5286 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5287 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5288 /* Absolute thumb alu relocations. */
5290 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5295 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5300 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5305 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5310 /* Given the address of a pointer pointing to the textual name of a group
5311 relocation as may appear in assembler source, attempt to find its details
5312 in group_reloc_table. The pointer will be updated to the character after
5313 the trailing colon. On failure, FAIL will be returned; SUCCESS
5314 otherwise. On success, *entry will be updated to point at the relevant
5315 group_reloc_table entry. */
5318 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5321 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5323 int length
= strlen (group_reloc_table
[i
].name
);
5325 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5326 && (*str
)[length
] == ':')
5328 *out
= &group_reloc_table
[i
];
5329 *str
+= (length
+ 1);
5337 /* Parse a <shifter_operand> for an ARM data processing instruction
5338 (as for parse_shifter_operand) where group relocations are allowed:
5341 #<immediate>, <rotate>
5342 #:<group_reloc>:<expression>
5346 where <group_reloc> is one of the strings defined in group_reloc_table.
5347 The hashes are optional.
5349 Everything else is as for parse_shifter_operand. */
5351 static parse_operand_result
5352 parse_shifter_operand_group_reloc (char **str
, int i
)
5354 /* Determine if we have the sequence of characters #: or just :
5355 coming next. If we do, then we check for a group relocation.
5356 If we don't, punt the whole lot to parse_shifter_operand. */
5358 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5359 || (*str
)[0] == ':')
5361 struct group_reloc_table_entry
*entry
;
5363 if ((*str
)[0] == '#')
5368 /* Try to parse a group relocation. Anything else is an error. */
5369 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5371 inst
.error
= _("unknown group relocation");
5372 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5375 /* We now have the group relocation table entry corresponding to
5376 the name in the assembler source. Next, we parse the expression. */
5377 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5378 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5380 /* Record the relocation type (always the ALU variant here). */
5381 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5382 gas_assert (inst
.reloc
.type
!= 0);
5384 return PARSE_OPERAND_SUCCESS
;
5387 return parse_shifter_operand (str
, i
) == SUCCESS
5388 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5390 /* Never reached. */
5393 /* Parse a Neon alignment expression. Information is written to
5394 inst.operands[i]. We assume the initial ':' has been skipped.
5396 align .imm = align << 8, .immisalign=1, .preind=0 */
5397 static parse_operand_result
5398 parse_neon_alignment (char **str
, int i
)
5403 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5405 if (exp
.X_op
!= O_constant
)
5407 inst
.error
= _("alignment must be constant");
5408 return PARSE_OPERAND_FAIL
;
5411 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5412 inst
.operands
[i
].immisalign
= 1;
5413 /* Alignments are not pre-indexes. */
5414 inst
.operands
[i
].preind
= 0;
5417 return PARSE_OPERAND_SUCCESS
;
5420 /* Parse all forms of an ARM address expression. Information is written
5421 to inst.operands[i] and/or inst.reloc.
5423 Preindexed addressing (.preind=1):
5425 [Rn, #offset] .reg=Rn .reloc.exp=offset
5426 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5427 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5428 .shift_kind=shift .reloc.exp=shift_imm
5430 These three may have a trailing ! which causes .writeback to be set also.
5432 Postindexed addressing (.postind=1, .writeback=1):
5434 [Rn], #offset .reg=Rn .reloc.exp=offset
5435 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5436 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5437 .shift_kind=shift .reloc.exp=shift_imm
5439 Unindexed addressing (.preind=0, .postind=0):
5441 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5445 [Rn]{!} shorthand for [Rn,#0]{!}
5446 =immediate .isreg=0 .reloc.exp=immediate
5447 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5449 It is the caller's responsibility to check for addressing modes not
5450 supported by the instruction, and to set inst.reloc.type. */
5452 static parse_operand_result
5453 parse_address_main (char **str
, int i
, int group_relocations
,
5454 group_reloc_type group_type
)
5459 if (skip_past_char (&p
, '[') == FAIL
)
5461 if (skip_past_char (&p
, '=') == FAIL
)
5463 /* Bare address - translate to PC-relative offset. */
5464 inst
.reloc
.pc_rel
= 1;
5465 inst
.operands
[i
].reg
= REG_PC
;
5466 inst
.operands
[i
].isreg
= 1;
5467 inst
.operands
[i
].preind
= 1;
5469 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5470 return PARSE_OPERAND_FAIL
;
5472 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5473 /*allow_symbol_p=*/TRUE
))
5474 return PARSE_OPERAND_FAIL
;
5477 return PARSE_OPERAND_SUCCESS
;
5480 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5481 skip_whitespace (p
);
5483 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5485 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5486 return PARSE_OPERAND_FAIL
;
5488 inst
.operands
[i
].reg
= reg
;
5489 inst
.operands
[i
].isreg
= 1;
5491 if (skip_past_comma (&p
) == SUCCESS
)
5493 inst
.operands
[i
].preind
= 1;
5496 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5498 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5500 inst
.operands
[i
].imm
= reg
;
5501 inst
.operands
[i
].immisreg
= 1;
5503 if (skip_past_comma (&p
) == SUCCESS
)
5504 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5505 return PARSE_OPERAND_FAIL
;
5507 else if (skip_past_char (&p
, ':') == SUCCESS
)
5509 /* FIXME: '@' should be used here, but it's filtered out by generic
5510 code before we get to see it here. This may be subject to
5512 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5514 if (result
!= PARSE_OPERAND_SUCCESS
)
5519 if (inst
.operands
[i
].negative
)
5521 inst
.operands
[i
].negative
= 0;
5525 if (group_relocations
5526 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5528 struct group_reloc_table_entry
*entry
;
5530 /* Skip over the #: or : sequence. */
5536 /* Try to parse a group relocation. Anything else is an
5538 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5540 inst
.error
= _("unknown group relocation");
5541 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5544 /* We now have the group relocation table entry corresponding to
5545 the name in the assembler source. Next, we parse the
5547 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5548 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5550 /* Record the relocation type. */
5554 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5558 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5562 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5569 if (inst
.reloc
.type
== 0)
5571 inst
.error
= _("this group relocation is not allowed on this instruction");
5572 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5578 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5579 return PARSE_OPERAND_FAIL
;
5580 /* If the offset is 0, find out if it's a +0 or -0. */
5581 if (inst
.reloc
.exp
.X_op
== O_constant
5582 && inst
.reloc
.exp
.X_add_number
== 0)
5584 skip_whitespace (q
);
5588 skip_whitespace (q
);
5591 inst
.operands
[i
].negative
= 1;
5596 else if (skip_past_char (&p
, ':') == SUCCESS
)
5598 /* FIXME: '@' should be used here, but it's filtered out by generic code
5599 before we get to see it here. This may be subject to change. */
5600 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5602 if (result
!= PARSE_OPERAND_SUCCESS
)
5606 if (skip_past_char (&p
, ']') == FAIL
)
5608 inst
.error
= _("']' expected");
5609 return PARSE_OPERAND_FAIL
;
5612 if (skip_past_char (&p
, '!') == SUCCESS
)
5613 inst
.operands
[i
].writeback
= 1;
5615 else if (skip_past_comma (&p
) == SUCCESS
)
5617 if (skip_past_char (&p
, '{') == SUCCESS
)
5619 /* [Rn], {expr} - unindexed, with option */
5620 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5621 0, 255, TRUE
) == FAIL
)
5622 return PARSE_OPERAND_FAIL
;
5624 if (skip_past_char (&p
, '}') == FAIL
)
5626 inst
.error
= _("'}' expected at end of 'option' field");
5627 return PARSE_OPERAND_FAIL
;
5629 if (inst
.operands
[i
].preind
)
5631 inst
.error
= _("cannot combine index with option");
5632 return PARSE_OPERAND_FAIL
;
5635 return PARSE_OPERAND_SUCCESS
;
5639 inst
.operands
[i
].postind
= 1;
5640 inst
.operands
[i
].writeback
= 1;
5642 if (inst
.operands
[i
].preind
)
5644 inst
.error
= _("cannot combine pre- and post-indexing");
5645 return PARSE_OPERAND_FAIL
;
5649 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5651 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5653 /* We might be using the immediate for alignment already. If we
5654 are, OR the register number into the low-order bits. */
5655 if (inst
.operands
[i
].immisalign
)
5656 inst
.operands
[i
].imm
|= reg
;
5658 inst
.operands
[i
].imm
= reg
;
5659 inst
.operands
[i
].immisreg
= 1;
5661 if (skip_past_comma (&p
) == SUCCESS
)
5662 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5663 return PARSE_OPERAND_FAIL
;
5668 if (inst
.operands
[i
].negative
)
5670 inst
.operands
[i
].negative
= 0;
5673 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5674 return PARSE_OPERAND_FAIL
;
5675 /* If the offset is 0, find out if it's a +0 or -0. */
5676 if (inst
.reloc
.exp
.X_op
== O_constant
5677 && inst
.reloc
.exp
.X_add_number
== 0)
5679 skip_whitespace (q
);
5683 skip_whitespace (q
);
5686 inst
.operands
[i
].negative
= 1;
5692 /* If at this point neither .preind nor .postind is set, we have a
5693 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5694 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5696 inst
.operands
[i
].preind
= 1;
5697 inst
.reloc
.exp
.X_op
= O_constant
;
5698 inst
.reloc
.exp
.X_add_number
= 0;
5701 return PARSE_OPERAND_SUCCESS
;
5705 parse_address (char **str
, int i
)
5707 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5711 static parse_operand_result
5712 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5714 return parse_address_main (str
, i
, 1, type
);
5717 /* Parse an operand for a MOVW or MOVT instruction. */
5719 parse_half (char **str
)
5724 skip_past_char (&p
, '#');
5725 if (strncasecmp (p
, ":lower16:", 9) == 0)
5726 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5727 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5728 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5730 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5733 skip_whitespace (p
);
5736 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5739 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5741 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5743 inst
.error
= _("constant expression expected");
5746 if (inst
.reloc
.exp
.X_add_number
< 0
5747 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5749 inst
.error
= _("immediate value out of range");
5757 /* Miscellaneous. */
5759 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5760 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5762 parse_psr (char **str
, bfd_boolean lhs
)
5765 unsigned long psr_field
;
5766 const struct asm_psr
*psr
;
5768 bfd_boolean is_apsr
= FALSE
;
5769 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5771 /* PR gas/12698: If the user has specified -march=all then m_profile will
5772 be TRUE, but we want to ignore it in this case as we are building for any
5773 CPU type, including non-m variants. */
5774 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5777 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5778 feature for ease of use and backwards compatibility. */
5780 if (strncasecmp (p
, "SPSR", 4) == 0)
5783 goto unsupported_psr
;
5785 psr_field
= SPSR_BIT
;
5787 else if (strncasecmp (p
, "CPSR", 4) == 0)
5790 goto unsupported_psr
;
5794 else if (strncasecmp (p
, "APSR", 4) == 0)
5796 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5797 and ARMv7-R architecture CPUs. */
5806 while (ISALNUM (*p
) || *p
== '_');
5808 if (strncasecmp (start
, "iapsr", 5) == 0
5809 || strncasecmp (start
, "eapsr", 5) == 0
5810 || strncasecmp (start
, "xpsr", 4) == 0
5811 || strncasecmp (start
, "psr", 3) == 0)
5812 p
= start
+ strcspn (start
, "rR") + 1;
5814 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5820 /* If APSR is being written, a bitfield may be specified. Note that
5821 APSR itself is handled above. */
5822 if (psr
->field
<= 3)
5824 psr_field
= psr
->field
;
5830 /* M-profile MSR instructions have the mask field set to "10", except
5831 *PSR variants which modify APSR, which may use a different mask (and
5832 have been handled already). Do that by setting the PSR_f field
5834 return psr
->field
| (lhs
? PSR_f
: 0);
5837 goto unsupported_psr
;
5843 /* A suffix follows. */
5849 while (ISALNUM (*p
) || *p
== '_');
5853 /* APSR uses a notation for bits, rather than fields. */
5854 unsigned int nzcvq_bits
= 0;
5855 unsigned int g_bit
= 0;
5858 for (bit
= start
; bit
!= p
; bit
++)
5860 switch (TOLOWER (*bit
))
5863 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5867 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5871 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5875 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5879 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5883 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5887 inst
.error
= _("unexpected bit specified after APSR");
5892 if (nzcvq_bits
== 0x1f)
5897 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5899 inst
.error
= _("selected processor does not "
5900 "support DSP extension");
5907 if ((nzcvq_bits
& 0x20) != 0
5908 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5909 || (g_bit
& 0x2) != 0)
5911 inst
.error
= _("bad bitmask specified after APSR");
5917 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5922 psr_field
|= psr
->field
;
5928 goto error
; /* Garbage after "[CS]PSR". */
5930 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5931 is deprecated, but allow it anyway. */
5935 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5938 else if (!m_profile
)
5939 /* These bits are never right for M-profile devices: don't set them
5940 (only code paths which read/write APSR reach here). */
5941 psr_field
|= (PSR_c
| PSR_f
);
5947 inst
.error
= _("selected processor does not support requested special "
5948 "purpose register");
5952 inst
.error
= _("flag for {c}psr instruction expected");
5956 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5957 value suitable for splatting into the AIF field of the instruction. */
5960 parse_cps_flags (char **str
)
5969 case '\0': case ',':
5972 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5973 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5974 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5977 inst
.error
= _("unrecognized CPS flag");
5982 if (saw_a_flag
== 0)
5984 inst
.error
= _("missing CPS flags");
5992 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5993 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5996 parse_endian_specifier (char **str
)
6001 if (strncasecmp (s
, "BE", 2))
6003 else if (strncasecmp (s
, "LE", 2))
6007 inst
.error
= _("valid endian specifiers are be or le");
6011 if (ISALNUM (s
[2]) || s
[2] == '_')
6013 inst
.error
= _("valid endian specifiers are be or le");
6018 return little_endian
;
6021 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6022 value suitable for poking into the rotate field of an sxt or sxta
6023 instruction, or FAIL on error. */
6026 parse_ror (char **str
)
6031 if (strncasecmp (s
, "ROR", 3) == 0)
6035 inst
.error
= _("missing rotation field after comma");
6039 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6044 case 0: *str
= s
; return 0x0;
6045 case 8: *str
= s
; return 0x1;
6046 case 16: *str
= s
; return 0x2;
6047 case 24: *str
= s
; return 0x3;
6050 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6055 /* Parse a conditional code (from conds[] below). The value returned is in the
6056 range 0 .. 14, or FAIL. */
6058 parse_cond (char **str
)
6061 const struct asm_cond
*c
;
6063 /* Condition codes are always 2 characters, so matching up to
6064 3 characters is sufficient. */
6069 while (ISALPHA (*q
) && n
< 3)
6071 cond
[n
] = TOLOWER (*q
);
6076 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6079 inst
.error
= _("condition required");
6087 /* If the given feature available in the selected CPU, mark it as used.
6088 Returns TRUE iff feature is available. */
6090 mark_feature_used (const arm_feature_set
*feature
)
6092 /* Ensure the option is valid on the current architecture. */
6093 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6096 /* Add the appropriate architecture feature for the barrier option used.
6099 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6101 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6106 /* Parse an option for a barrier instruction. Returns the encoding for the
6109 parse_barrier (char **str
)
6112 const struct asm_barrier_opt
*o
;
6115 while (ISALPHA (*q
))
6118 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6123 if (!mark_feature_used (&o
->arch
))
6130 /* Parse the operands of a table branch instruction. Similar to a memory
6133 parse_tb (char **str
)
6138 if (skip_past_char (&p
, '[') == FAIL
)
6140 inst
.error
= _("'[' expected");
6144 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6146 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6149 inst
.operands
[0].reg
= reg
;
6151 if (skip_past_comma (&p
) == FAIL
)
6153 inst
.error
= _("',' expected");
6157 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6159 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6162 inst
.operands
[0].imm
= reg
;
6164 if (skip_past_comma (&p
) == SUCCESS
)
6166 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6168 if (inst
.reloc
.exp
.X_add_number
!= 1)
6170 inst
.error
= _("invalid shift");
6173 inst
.operands
[0].shifted
= 1;
6176 if (skip_past_char (&p
, ']') == FAIL
)
6178 inst
.error
= _("']' expected");
6185 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6186 information on the types the operands can take and how they are encoded.
6187 Up to four operands may be read; this function handles setting the
6188 ".present" field for each read operand itself.
6189 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6190 else returns FAIL. */
6193 parse_neon_mov (char **str
, int *which_operand
)
6195 int i
= *which_operand
, val
;
6196 enum arm_reg_type rtype
;
6198 struct neon_type_el optype
;
6200 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6202 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6203 inst
.operands
[i
].reg
= val
;
6204 inst
.operands
[i
].isscalar
= 1;
6205 inst
.operands
[i
].vectype
= optype
;
6206 inst
.operands
[i
++].present
= 1;
6208 if (skip_past_comma (&ptr
) == FAIL
)
6211 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6214 inst
.operands
[i
].reg
= val
;
6215 inst
.operands
[i
].isreg
= 1;
6216 inst
.operands
[i
].present
= 1;
6218 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6221 /* Cases 0, 1, 2, 3, 5 (D only). */
6222 if (skip_past_comma (&ptr
) == FAIL
)
6225 inst
.operands
[i
].reg
= val
;
6226 inst
.operands
[i
].isreg
= 1;
6227 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6228 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6229 inst
.operands
[i
].isvec
= 1;
6230 inst
.operands
[i
].vectype
= optype
;
6231 inst
.operands
[i
++].present
= 1;
6233 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6235 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6236 Case 13: VMOV <Sd>, <Rm> */
6237 inst
.operands
[i
].reg
= val
;
6238 inst
.operands
[i
].isreg
= 1;
6239 inst
.operands
[i
].present
= 1;
6241 if (rtype
== REG_TYPE_NQ
)
6243 first_error (_("can't use Neon quad register here"));
6246 else if (rtype
!= REG_TYPE_VFS
)
6249 if (skip_past_comma (&ptr
) == FAIL
)
6251 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6253 inst
.operands
[i
].reg
= val
;
6254 inst
.operands
[i
].isreg
= 1;
6255 inst
.operands
[i
].present
= 1;
6258 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6261 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6262 Case 1: VMOV<c><q> <Dd>, <Dm>
6263 Case 8: VMOV.F32 <Sd>, <Sm>
6264 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6266 inst
.operands
[i
].reg
= val
;
6267 inst
.operands
[i
].isreg
= 1;
6268 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6269 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6270 inst
.operands
[i
].isvec
= 1;
6271 inst
.operands
[i
].vectype
= optype
;
6272 inst
.operands
[i
].present
= 1;
6274 if (skip_past_comma (&ptr
) == SUCCESS
)
6279 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6282 inst
.operands
[i
].reg
= val
;
6283 inst
.operands
[i
].isreg
= 1;
6284 inst
.operands
[i
++].present
= 1;
6286 if (skip_past_comma (&ptr
) == FAIL
)
6289 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6292 inst
.operands
[i
].reg
= val
;
6293 inst
.operands
[i
].isreg
= 1;
6294 inst
.operands
[i
].present
= 1;
6297 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6298 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6299 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6300 Case 10: VMOV.F32 <Sd>, #<imm>
6301 Case 11: VMOV.F64 <Dd>, #<imm> */
6302 inst
.operands
[i
].immisfloat
= 1;
6303 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6305 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6306 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6310 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6314 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6317 inst
.operands
[i
].reg
= val
;
6318 inst
.operands
[i
].isreg
= 1;
6319 inst
.operands
[i
++].present
= 1;
6321 if (skip_past_comma (&ptr
) == FAIL
)
6324 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6326 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6327 inst
.operands
[i
].reg
= val
;
6328 inst
.operands
[i
].isscalar
= 1;
6329 inst
.operands
[i
].present
= 1;
6330 inst
.operands
[i
].vectype
= optype
;
6332 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6334 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6335 inst
.operands
[i
].reg
= val
;
6336 inst
.operands
[i
].isreg
= 1;
6337 inst
.operands
[i
++].present
= 1;
6339 if (skip_past_comma (&ptr
) == FAIL
)
6342 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6345 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6349 inst
.operands
[i
].reg
= val
;
6350 inst
.operands
[i
].isreg
= 1;
6351 inst
.operands
[i
].isvec
= 1;
6352 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6353 inst
.operands
[i
].vectype
= optype
;
6354 inst
.operands
[i
].present
= 1;
6356 if (rtype
== REG_TYPE_VFS
)
6360 if (skip_past_comma (&ptr
) == FAIL
)
6362 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6365 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6368 inst
.operands
[i
].reg
= val
;
6369 inst
.operands
[i
].isreg
= 1;
6370 inst
.operands
[i
].isvec
= 1;
6371 inst
.operands
[i
].issingle
= 1;
6372 inst
.operands
[i
].vectype
= optype
;
6373 inst
.operands
[i
].present
= 1;
6376 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6380 inst
.operands
[i
].reg
= val
;
6381 inst
.operands
[i
].isreg
= 1;
6382 inst
.operands
[i
].isvec
= 1;
6383 inst
.operands
[i
].issingle
= 1;
6384 inst
.operands
[i
].vectype
= optype
;
6385 inst
.operands
[i
].present
= 1;
6390 first_error (_("parse error"));
6394 /* Successfully parsed the operands. Update args. */
6400 first_error (_("expected comma"));
6404 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6408 /* Use this macro when the operand constraints are different
6409 for ARM and THUMB (e.g. ldrd). */
6410 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6411 ((arm_operand) | ((thumb_operand) << 16))
6413 /* Matcher codes for parse_operands. */
6414 enum operand_parse_code
6416 OP_stop
, /* end of line */
6418 OP_RR
, /* ARM register */
6419 OP_RRnpc
, /* ARM register, not r15 */
6420 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6421 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6422 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6423 optional trailing ! */
6424 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6425 OP_RCP
, /* Coprocessor number */
6426 OP_RCN
, /* Coprocessor register */
6427 OP_RF
, /* FPA register */
6428 OP_RVS
, /* VFP single precision register */
6429 OP_RVD
, /* VFP double precision register (0..15) */
6430 OP_RND
, /* Neon double precision register (0..31) */
6431 OP_RNQ
, /* Neon quad precision register */
6432 OP_RVSD
, /* VFP single or double precision register */
6433 OP_RNDQ
, /* Neon double or quad precision register */
6434 OP_RNSDQ
, /* Neon single, double or quad precision register */
6435 OP_RNSC
, /* Neon scalar D[X] */
6436 OP_RVC
, /* VFP control register */
6437 OP_RMF
, /* Maverick F register */
6438 OP_RMD
, /* Maverick D register */
6439 OP_RMFX
, /* Maverick FX register */
6440 OP_RMDX
, /* Maverick DX register */
6441 OP_RMAX
, /* Maverick AX register */
6442 OP_RMDS
, /* Maverick DSPSC register */
6443 OP_RIWR
, /* iWMMXt wR register */
6444 OP_RIWC
, /* iWMMXt wC register */
6445 OP_RIWG
, /* iWMMXt wCG register */
6446 OP_RXA
, /* XScale accumulator register */
6448 OP_REGLST
, /* ARM register list */
6449 OP_VRSLST
, /* VFP single-precision register list */
6450 OP_VRDLST
, /* VFP double-precision register list */
6451 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6452 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6453 OP_NSTRLST
, /* Neon element/structure list */
6455 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6456 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6457 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6458 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6459 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6460 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6461 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6462 OP_VMOV
, /* Neon VMOV operands. */
6463 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6464 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6465 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6467 OP_I0
, /* immediate zero */
6468 OP_I7
, /* immediate value 0 .. 7 */
6469 OP_I15
, /* 0 .. 15 */
6470 OP_I16
, /* 1 .. 16 */
6471 OP_I16z
, /* 0 .. 16 */
6472 OP_I31
, /* 0 .. 31 */
6473 OP_I31w
, /* 0 .. 31, optional trailing ! */
6474 OP_I32
, /* 1 .. 32 */
6475 OP_I32z
, /* 0 .. 32 */
6476 OP_I63
, /* 0 .. 63 */
6477 OP_I63s
, /* -64 .. 63 */
6478 OP_I64
, /* 1 .. 64 */
6479 OP_I64z
, /* 0 .. 64 */
6480 OP_I255
, /* 0 .. 255 */
6482 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6483 OP_I7b
, /* 0 .. 7 */
6484 OP_I15b
, /* 0 .. 15 */
6485 OP_I31b
, /* 0 .. 31 */
6487 OP_SH
, /* shifter operand */
6488 OP_SHG
, /* shifter operand with possible group relocation */
6489 OP_ADDR
, /* Memory address expression (any mode) */
6490 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6491 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6492 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6493 OP_EXP
, /* arbitrary expression */
6494 OP_EXPi
, /* same, with optional immediate prefix */
6495 OP_EXPr
, /* same, with optional relocation suffix */
6496 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6498 OP_CPSF
, /* CPS flags */
6499 OP_ENDI
, /* Endianness specifier */
6500 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6501 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6502 OP_COND
, /* conditional code */
6503 OP_TB
, /* Table branch. */
6505 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6507 OP_RRnpc_I0
, /* ARM register or literal 0 */
6508 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
6509 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6510 OP_RF_IF
, /* FPA register or immediate */
6511 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6512 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6514 /* Optional operands. */
6515 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6516 OP_oI31b
, /* 0 .. 31 */
6517 OP_oI32b
, /* 1 .. 32 */
6518 OP_oI32z
, /* 0 .. 32 */
6519 OP_oIffffb
, /* 0 .. 65535 */
6520 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6522 OP_oRR
, /* ARM register */
6523 OP_oRRnpc
, /* ARM register, not the PC */
6524 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6525 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6526 OP_oRND
, /* Optional Neon double precision register */
6527 OP_oRNQ
, /* Optional Neon quad precision register */
6528 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6529 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6530 OP_oSHll
, /* LSL immediate */
6531 OP_oSHar
, /* ASR immediate */
6532 OP_oSHllar
, /* LSL or ASR immediate */
6533 OP_oROR
, /* ROR 0/8/16/24 */
6534 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6536 /* Some pre-defined mixed (ARM/THUMB) operands. */
6537 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6538 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6539 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6541 OP_FIRST_OPTIONAL
= OP_oI7b
6544 /* Generic instruction operand parser. This does no encoding and no
6545 semantic validation; it merely squirrels values away in the inst
6546 structure. Returns SUCCESS or FAIL depending on whether the
6547 specified grammar matched. */
6549 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6551 unsigned const int *upat
= pattern
;
6552 char *backtrack_pos
= 0;
6553 const char *backtrack_error
= 0;
6554 int i
, val
= 0, backtrack_index
= 0;
6555 enum arm_reg_type rtype
;
6556 parse_operand_result result
;
6557 unsigned int op_parse_code
;
6559 #define po_char_or_fail(chr) \
6562 if (skip_past_char (&str, chr) == FAIL) \
6567 #define po_reg_or_fail(regtype) \
6570 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6571 & inst.operands[i].vectype); \
6574 first_error (_(reg_expected_msgs[regtype])); \
6577 inst.operands[i].reg = val; \
6578 inst.operands[i].isreg = 1; \
6579 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6580 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6581 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6582 || rtype == REG_TYPE_VFD \
6583 || rtype == REG_TYPE_NQ); \
6587 #define po_reg_or_goto(regtype, label) \
6590 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6591 & inst.operands[i].vectype); \
6595 inst.operands[i].reg = val; \
6596 inst.operands[i].isreg = 1; \
6597 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6598 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6599 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6600 || rtype == REG_TYPE_VFD \
6601 || rtype == REG_TYPE_NQ); \
6605 #define po_imm_or_fail(min, max, popt) \
6608 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6610 inst.operands[i].imm = val; \
6614 #define po_scalar_or_goto(elsz, label) \
6617 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6620 inst.operands[i].reg = val; \
6621 inst.operands[i].isscalar = 1; \
6625 #define po_misc_or_fail(expr) \
6633 #define po_misc_or_fail_no_backtrack(expr) \
6637 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6638 backtrack_pos = 0; \
6639 if (result != PARSE_OPERAND_SUCCESS) \
6644 #define po_barrier_or_imm(str) \
6647 val = parse_barrier (&str); \
6648 if (val == FAIL && ! ISALPHA (*str)) \
6651 /* ISB can only take SY as an option. */ \
6652 || ((inst.instruction & 0xf0) == 0x60 \
6655 inst.error = _("invalid barrier type"); \
6656 backtrack_pos = 0; \
6662 skip_whitespace (str
);
6664 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6666 op_parse_code
= upat
[i
];
6667 if (op_parse_code
>= 1<<16)
6668 op_parse_code
= thumb
? (op_parse_code
>> 16)
6669 : (op_parse_code
& ((1<<16)-1));
6671 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6673 /* Remember where we are in case we need to backtrack. */
6674 gas_assert (!backtrack_pos
);
6675 backtrack_pos
= str
;
6676 backtrack_error
= inst
.error
;
6677 backtrack_index
= i
;
6680 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6681 po_char_or_fail (',');
6683 switch (op_parse_code
)
6691 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6692 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6693 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6694 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6695 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6696 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6698 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6700 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6702 /* Also accept generic coprocessor regs for unknown registers. */
6704 po_reg_or_fail (REG_TYPE_CN
);
6706 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6707 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6708 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6709 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6710 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6711 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6712 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6713 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6714 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6715 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6717 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6719 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6720 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6722 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6724 /* Neon scalar. Using an element size of 8 means that some invalid
6725 scalars are accepted here, so deal with those in later code. */
6726 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6730 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6733 po_imm_or_fail (0, 0, TRUE
);
6738 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6743 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6746 if (parse_ifimm_zero (&str
))
6747 inst
.operands
[i
].imm
= 0;
6751 = _("only floating point zero is allowed as immediate value");
6759 po_scalar_or_goto (8, try_rr
);
6762 po_reg_or_fail (REG_TYPE_RN
);
6768 po_scalar_or_goto (8, try_nsdq
);
6771 po_reg_or_fail (REG_TYPE_NSDQ
);
6777 po_scalar_or_goto (8, try_ndq
);
6780 po_reg_or_fail (REG_TYPE_NDQ
);
6786 po_scalar_or_goto (8, try_vfd
);
6789 po_reg_or_fail (REG_TYPE_VFD
);
6794 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6795 not careful then bad things might happen. */
6796 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6801 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6804 /* There's a possibility of getting a 64-bit immediate here, so
6805 we need special handling. */
6806 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6809 inst
.error
= _("immediate value is out of range");
6817 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6820 po_imm_or_fail (0, 63, TRUE
);
6825 po_char_or_fail ('[');
6826 po_reg_or_fail (REG_TYPE_RN
);
6827 po_char_or_fail (']');
6833 po_reg_or_fail (REG_TYPE_RN
);
6834 if (skip_past_char (&str
, '!') == SUCCESS
)
6835 inst
.operands
[i
].writeback
= 1;
6839 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6840 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6841 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6842 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6843 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6844 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6845 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6846 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6847 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6848 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6849 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6850 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6852 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6854 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6855 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6857 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6858 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6859 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6860 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6862 /* Immediate variants */
6864 po_char_or_fail ('{');
6865 po_imm_or_fail (0, 255, TRUE
);
6866 po_char_or_fail ('}');
6870 /* The expression parser chokes on a trailing !, so we have
6871 to find it first and zap it. */
6874 while (*s
&& *s
!= ',')
6879 inst
.operands
[i
].writeback
= 1;
6881 po_imm_or_fail (0, 31, TRUE
);
6889 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6894 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6899 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6901 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6903 val
= parse_reloc (&str
);
6906 inst
.error
= _("unrecognized relocation suffix");
6909 else if (val
!= BFD_RELOC_UNUSED
)
6911 inst
.operands
[i
].imm
= val
;
6912 inst
.operands
[i
].hasreloc
= 1;
6917 /* Operand for MOVW or MOVT. */
6919 po_misc_or_fail (parse_half (&str
));
6922 /* Register or expression. */
6923 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6924 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6926 /* Register or immediate. */
6927 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6928 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6930 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6932 if (!is_immediate_prefix (*str
))
6935 val
= parse_fpa_immediate (&str
);
6938 /* FPA immediates are encoded as registers 8-15.
6939 parse_fpa_immediate has already applied the offset. */
6940 inst
.operands
[i
].reg
= val
;
6941 inst
.operands
[i
].isreg
= 1;
6944 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6945 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6947 /* Two kinds of register. */
6950 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6952 || (rege
->type
!= REG_TYPE_MMXWR
6953 && rege
->type
!= REG_TYPE_MMXWC
6954 && rege
->type
!= REG_TYPE_MMXWCG
))
6956 inst
.error
= _("iWMMXt data or control register expected");
6959 inst
.operands
[i
].reg
= rege
->number
;
6960 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6966 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6968 || (rege
->type
!= REG_TYPE_MMXWC
6969 && rege
->type
!= REG_TYPE_MMXWCG
))
6971 inst
.error
= _("iWMMXt control register expected");
6974 inst
.operands
[i
].reg
= rege
->number
;
6975 inst
.operands
[i
].isreg
= 1;
6980 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6981 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6982 case OP_oROR
: val
= parse_ror (&str
); break;
6983 case OP_COND
: val
= parse_cond (&str
); break;
6984 case OP_oBARRIER_I15
:
6985 po_barrier_or_imm (str
); break;
6987 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
6993 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
6994 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
6996 inst
.error
= _("Banked registers are not available with this "
7002 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7006 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7009 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7011 if (strncasecmp (str
, "APSR_", 5) == 0)
7018 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7019 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7020 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7021 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7022 default: found
= 16;
7026 inst
.operands
[i
].isvec
= 1;
7027 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7028 inst
.operands
[i
].reg
= REG_PC
;
7035 po_misc_or_fail (parse_tb (&str
));
7038 /* Register lists. */
7040 val
= parse_reg_list (&str
);
7043 inst
.operands
[i
].writeback
= 1;
7049 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7053 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7057 /* Allow Q registers too. */
7058 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7063 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7065 inst
.operands
[i
].issingle
= 1;
7070 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7075 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7076 &inst
.operands
[i
].vectype
);
7079 /* Addressing modes */
7081 po_misc_or_fail (parse_address (&str
, i
));
7085 po_misc_or_fail_no_backtrack (
7086 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7090 po_misc_or_fail_no_backtrack (
7091 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7095 po_misc_or_fail_no_backtrack (
7096 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7100 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7104 po_misc_or_fail_no_backtrack (
7105 parse_shifter_operand_group_reloc (&str
, i
));
7109 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7113 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7117 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7121 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7124 /* Various value-based sanity checks and shared operations. We
7125 do not signal immediate failures for the register constraints;
7126 this allows a syntax error to take precedence. */
7127 switch (op_parse_code
)
7135 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7136 inst
.error
= BAD_PC
;
7141 if (inst
.operands
[i
].isreg
)
7143 if (inst
.operands
[i
].reg
== REG_PC
)
7144 inst
.error
= BAD_PC
;
7145 else if (inst
.operands
[i
].reg
== REG_SP
)
7146 inst
.error
= BAD_SP
;
7151 if (inst
.operands
[i
].isreg
7152 && inst
.operands
[i
].reg
== REG_PC
7153 && (inst
.operands
[i
].writeback
|| thumb
))
7154 inst
.error
= BAD_PC
;
7163 case OP_oBARRIER_I15
:
7172 inst
.operands
[i
].imm
= val
;
7179 /* If we get here, this operand was successfully parsed. */
7180 inst
.operands
[i
].present
= 1;
7184 inst
.error
= BAD_ARGS
;
7189 /* The parse routine should already have set inst.error, but set a
7190 default here just in case. */
7192 inst
.error
= _("syntax error");
7196 /* Do not backtrack over a trailing optional argument that
7197 absorbed some text. We will only fail again, with the
7198 'garbage following instruction' error message, which is
7199 probably less helpful than the current one. */
7200 if (backtrack_index
== i
&& backtrack_pos
!= str
7201 && upat
[i
+1] == OP_stop
)
7204 inst
.error
= _("syntax error");
7208 /* Try again, skipping the optional argument at backtrack_pos. */
7209 str
= backtrack_pos
;
7210 inst
.error
= backtrack_error
;
7211 inst
.operands
[backtrack_index
].present
= 0;
7212 i
= backtrack_index
;
7216 /* Check that we have parsed all the arguments. */
7217 if (*str
!= '\0' && !inst
.error
)
7218 inst
.error
= _("garbage following instruction");
7220 return inst
.error
? FAIL
: SUCCESS
;
7223 #undef po_char_or_fail
7224 #undef po_reg_or_fail
7225 #undef po_reg_or_goto
7226 #undef po_imm_or_fail
7227 #undef po_scalar_or_fail
7228 #undef po_barrier_or_imm
7230 /* Shorthand macro for instruction encoding functions issuing errors. */
7231 #define constraint(expr, err) \
7242 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7243 instructions are unpredictable if these registers are used. This
7244 is the BadReg predicate in ARM's Thumb-2 documentation. */
7245 #define reject_bad_reg(reg) \
7247 if (reg == REG_SP || reg == REG_PC) \
7249 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7254 /* If REG is R13 (the stack pointer), warn that its use is
7256 #define warn_deprecated_sp(reg) \
7258 if (warn_on_deprecated && reg == REG_SP) \
7259 as_tsktsk (_("use of r13 is deprecated")); \
7262 /* Functions for operand encoding. ARM, then Thumb. */
7264 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7266 /* If VAL can be encoded in the immediate field of an ARM instruction,
7267 return the encoded form. Otherwise, return FAIL. */
7270 encode_arm_immediate (unsigned int val
)
7277 for (i
= 2; i
< 32; i
+= 2)
7278 if ((a
= rotate_left (val
, i
)) <= 0xff)
7279 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7284 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7285 return the encoded form. Otherwise, return FAIL. */
7287 encode_thumb32_immediate (unsigned int val
)
7294 for (i
= 1; i
<= 24; i
++)
7297 if ((val
& ~(0xff << i
)) == 0)
7298 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7302 if (val
== ((a
<< 16) | a
))
7304 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7308 if (val
== ((a
<< 16) | a
))
7309 return 0x200 | (a
>> 8);
7313 /* Encode a VFP SP or DP register number into inst.instruction. */
7316 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7318 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7321 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7324 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7327 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7332 first_error (_("D register out of range for selected VFP version"));
7340 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7344 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7348 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7352 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7356 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7360 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7368 /* Encode a <shift> in an ARM-format instruction. The immediate,
7369 if any, is handled by md_apply_fix. */
7371 encode_arm_shift (int i
)
7373 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7374 inst
.instruction
|= SHIFT_ROR
<< 5;
7377 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7378 if (inst
.operands
[i
].immisreg
)
7380 inst
.instruction
|= SHIFT_BY_REG
;
7381 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7384 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7389 encode_arm_shifter_operand (int i
)
7391 if (inst
.operands
[i
].isreg
)
7393 inst
.instruction
|= inst
.operands
[i
].reg
;
7394 encode_arm_shift (i
);
7398 inst
.instruction
|= INST_IMMEDIATE
;
7399 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7400 inst
.instruction
|= inst
.operands
[i
].imm
;
7404 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7406 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7409 Generate an error if the operand is not a register. */
7410 constraint (!inst
.operands
[i
].isreg
,
7411 _("Instruction does not support =N addresses"));
7413 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7415 if (inst
.operands
[i
].preind
)
7419 inst
.error
= _("instruction does not accept preindexed addressing");
7422 inst
.instruction
|= PRE_INDEX
;
7423 if (inst
.operands
[i
].writeback
)
7424 inst
.instruction
|= WRITE_BACK
;
7427 else if (inst
.operands
[i
].postind
)
7429 gas_assert (inst
.operands
[i
].writeback
);
7431 inst
.instruction
|= WRITE_BACK
;
7433 else /* unindexed - only for coprocessor */
7435 inst
.error
= _("instruction does not accept unindexed addressing");
7439 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7440 && (((inst
.instruction
& 0x000f0000) >> 16)
7441 == ((inst
.instruction
& 0x0000f000) >> 12)))
7442 as_warn ((inst
.instruction
& LOAD_BIT
)
7443 ? _("destination register same as write-back base")
7444 : _("source register same as write-back base"));
7447 /* inst.operands[i] was set up by parse_address. Encode it into an
7448 ARM-format mode 2 load or store instruction. If is_t is true,
7449 reject forms that cannot be used with a T instruction (i.e. not
7452 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7454 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7456 encode_arm_addr_mode_common (i
, is_t
);
7458 if (inst
.operands
[i
].immisreg
)
7460 constraint ((inst
.operands
[i
].imm
== REG_PC
7461 || (is_pc
&& inst
.operands
[i
].writeback
)),
7463 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7464 inst
.instruction
|= inst
.operands
[i
].imm
;
7465 if (!inst
.operands
[i
].negative
)
7466 inst
.instruction
|= INDEX_UP
;
7467 if (inst
.operands
[i
].shifted
)
7469 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7470 inst
.instruction
|= SHIFT_ROR
<< 5;
7473 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7474 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7478 else /* immediate offset in inst.reloc */
7480 if (is_pc
&& !inst
.reloc
.pc_rel
)
7482 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7484 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7485 cannot use PC in addressing.
7486 PC cannot be used in writeback addressing, either. */
7487 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7490 /* Use of PC in str is deprecated for ARMv7. */
7491 if (warn_on_deprecated
7493 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7494 as_tsktsk (_("use of PC in this instruction is deprecated"));
7497 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7499 /* Prefer + for zero encoded value. */
7500 if (!inst
.operands
[i
].negative
)
7501 inst
.instruction
|= INDEX_UP
;
7502 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7507 /* inst.operands[i] was set up by parse_address. Encode it into an
7508 ARM-format mode 3 load or store instruction. Reject forms that
7509 cannot be used with such instructions. If is_t is true, reject
7510 forms that cannot be used with a T instruction (i.e. not
7513 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7515 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7517 inst
.error
= _("instruction does not accept scaled register index");
7521 encode_arm_addr_mode_common (i
, is_t
);
7523 if (inst
.operands
[i
].immisreg
)
7525 constraint ((inst
.operands
[i
].imm
== REG_PC
7526 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7528 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7530 inst
.instruction
|= inst
.operands
[i
].imm
;
7531 if (!inst
.operands
[i
].negative
)
7532 inst
.instruction
|= INDEX_UP
;
7534 else /* immediate offset in inst.reloc */
7536 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7537 && inst
.operands
[i
].writeback
),
7539 inst
.instruction
|= HWOFFSET_IMM
;
7540 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7542 /* Prefer + for zero encoded value. */
7543 if (!inst
.operands
[i
].negative
)
7544 inst
.instruction
|= INDEX_UP
;
7546 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7551 /* Write immediate bits [7:0] to the following locations:
7553 |28/24|23 19|18 16|15 4|3 0|
7554 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7556 This function is used by VMOV/VMVN/VORR/VBIC. */
7559 neon_write_immbits (unsigned immbits
)
7561 inst
.instruction
|= immbits
& 0xf;
7562 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7563 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7566 /* Invert low-order SIZE bits of XHI:XLO. */
7569 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7571 unsigned immlo
= xlo
? *xlo
: 0;
7572 unsigned immhi
= xhi
? *xhi
: 0;
7577 immlo
= (~immlo
) & 0xff;
7581 immlo
= (~immlo
) & 0xffff;
7585 immhi
= (~immhi
) & 0xffffffff;
7589 immlo
= (~immlo
) & 0xffffffff;
7603 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7607 neon_bits_same_in_bytes (unsigned imm
)
7609 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7610 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7611 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7612 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7615 /* For immediate of above form, return 0bABCD. */
7618 neon_squash_bits (unsigned imm
)
7620 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7621 | ((imm
& 0x01000000) >> 21);
7624 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7627 neon_qfloat_bits (unsigned imm
)
7629 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7632 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7633 the instruction. *OP is passed as the initial value of the op field, and
7634 may be set to a different value depending on the constant (i.e.
7635 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7636 MVN). If the immediate looks like a repeated pattern then also
7637 try smaller element sizes. */
7640 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7641 unsigned *immbits
, int *op
, int size
,
7642 enum neon_el_type type
)
7644 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7646 if (type
== NT_float
&& !float_p
)
7649 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7651 if (size
!= 32 || *op
== 1)
7653 *immbits
= neon_qfloat_bits (immlo
);
7659 if (neon_bits_same_in_bytes (immhi
)
7660 && neon_bits_same_in_bytes (immlo
))
7664 *immbits
= (neon_squash_bits (immhi
) << 4)
7665 | neon_squash_bits (immlo
);
7676 if (immlo
== (immlo
& 0x000000ff))
7681 else if (immlo
== (immlo
& 0x0000ff00))
7683 *immbits
= immlo
>> 8;
7686 else if (immlo
== (immlo
& 0x00ff0000))
7688 *immbits
= immlo
>> 16;
7691 else if (immlo
== (immlo
& 0xff000000))
7693 *immbits
= immlo
>> 24;
7696 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7698 *immbits
= (immlo
>> 8) & 0xff;
7701 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7703 *immbits
= (immlo
>> 16) & 0xff;
7707 if ((immlo
& 0xffff) != (immlo
>> 16))
7714 if (immlo
== (immlo
& 0x000000ff))
7719 else if (immlo
== (immlo
& 0x0000ff00))
7721 *immbits
= immlo
>> 8;
7725 if ((immlo
& 0xff) != (immlo
>> 8))
7730 if (immlo
== (immlo
& 0x000000ff))
7732 /* Don't allow MVN with 8-bit immediate. */
7742 #if defined BFD_HOST_64_BIT
7743 /* Returns TRUE if double precision value V may be cast
7744 to single precision without loss of accuracy. */
7747 is_double_a_single (bfd_int64_t v
)
7749 int exp
= (int)((v
>> 52) & 0x7FF);
7750 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7752 return (exp
== 0 || exp
== 0x7FF
7753 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7754 && (mantissa
& 0x1FFFFFFFl
) == 0;
7757 /* Returns a double precision value casted to single precision
7758 (ignoring the least significant bits in exponent and mantissa). */
7761 double_to_single (bfd_int64_t v
)
7763 int sign
= (int) ((v
>> 63) & 1l);
7764 int exp
= (int) ((v
>> 52) & 0x7FF);
7765 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7771 exp
= exp
- 1023 + 127;
7780 /* No denormalized numbers. */
7786 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7788 #endif /* BFD_HOST_64_BIT */
7797 static void do_vfp_nsyn_opcode (const char *);
7799 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7800 Determine whether it can be performed with a move instruction; if
7801 it can, convert inst.instruction to that move instruction and
7802 return TRUE; if it can't, convert inst.instruction to a literal-pool
7803 load and return FALSE. If this is not a valid thing to do in the
7804 current context, set inst.error and return TRUE.
7806 inst.operands[i] describes the destination register. */
7809 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7812 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7813 bfd_boolean arm_p
= (t
== CONST_ARM
);
7816 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7820 if ((inst
.instruction
& tbit
) == 0)
7822 inst
.error
= _("invalid pseudo operation");
7826 if (inst
.reloc
.exp
.X_op
!= O_constant
7827 && inst
.reloc
.exp
.X_op
!= O_symbol
7828 && inst
.reloc
.exp
.X_op
!= O_big
)
7830 inst
.error
= _("constant expression expected");
7834 if (inst
.reloc
.exp
.X_op
== O_constant
7835 || inst
.reloc
.exp
.X_op
== O_big
)
7837 #if defined BFD_HOST_64_BIT
7842 if (inst
.reloc
.exp
.X_op
== O_big
)
7844 LITTLENUM_TYPE w
[X_PRECISION
];
7847 if (inst
.reloc
.exp
.X_add_number
== -1)
7849 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7851 /* FIXME: Should we check words w[2..5] ? */
7856 #if defined BFD_HOST_64_BIT
7858 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7859 << LITTLENUM_NUMBER_OF_BITS
)
7860 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7861 << LITTLENUM_NUMBER_OF_BITS
)
7862 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7863 << LITTLENUM_NUMBER_OF_BITS
)
7864 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7866 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7867 | (l
[0] & LITTLENUM_MASK
);
7871 v
= inst
.reloc
.exp
.X_add_number
;
7873 if (!inst
.operands
[i
].issingle
)
7877 /* This can be encoded only for a low register. */
7878 if ((v
& ~0xFF) == 0 && (inst
.operands
[i
].reg
< 8))
7880 /* This can be done with a mov(1) instruction. */
7881 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7882 inst
.instruction
|= v
;
7886 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7887 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7889 /* Check if on thumb2 it can be done with a mov.w, mvn or
7890 movw instruction. */
7891 unsigned int newimm
;
7892 bfd_boolean isNegated
;
7894 newimm
= encode_thumb32_immediate (v
);
7895 if (newimm
!= (unsigned int) FAIL
)
7899 newimm
= encode_thumb32_immediate (~v
);
7900 if (newimm
!= (unsigned int) FAIL
)
7904 /* The number can be loaded with a mov.w or mvn
7906 if (newimm
!= (unsigned int) FAIL
7907 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
7909 inst
.instruction
= (0xf04f0000 /* MOV.W. */
7910 | (inst
.operands
[i
].reg
<< 8));
7911 /* Change to MOVN. */
7912 inst
.instruction
|= (isNegated
? 0x200000 : 0);
7913 inst
.instruction
|= (newimm
& 0x800) << 15;
7914 inst
.instruction
|= (newimm
& 0x700) << 4;
7915 inst
.instruction
|= (newimm
& 0x0ff);
7918 /* The number can be loaded with a movw instruction. */
7919 else if ((v
& ~0xFFFF) == 0
7920 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7922 int imm
= v
& 0xFFFF;
7924 inst
.instruction
= 0xf2400000; /* MOVW. */
7925 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
7926 inst
.instruction
|= (imm
& 0xf000) << 4;
7927 inst
.instruction
|= (imm
& 0x0800) << 15;
7928 inst
.instruction
|= (imm
& 0x0700) << 4;
7929 inst
.instruction
|= (imm
& 0x00ff);
7936 int value
= encode_arm_immediate (v
);
7940 /* This can be done with a mov instruction. */
7941 inst
.instruction
&= LITERAL_MASK
;
7942 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
7943 inst
.instruction
|= value
& 0xfff;
7947 value
= encode_arm_immediate (~ v
);
7950 /* This can be done with a mvn instruction. */
7951 inst
.instruction
&= LITERAL_MASK
;
7952 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
7953 inst
.instruction
|= value
& 0xfff;
7957 else if (t
== CONST_VEC
)
7960 unsigned immbits
= 0;
7961 unsigned immlo
= inst
.operands
[1].imm
;
7962 unsigned immhi
= inst
.operands
[1].regisimm
7963 ? inst
.operands
[1].reg
7964 : inst
.reloc
.exp
.X_unsigned
7966 : ((bfd_int64_t
)((int) immlo
)) >> 32;
7967 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7968 &op
, 64, NT_invtype
);
7972 neon_invert_size (&immlo
, &immhi
, 64);
7974 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7975 &op
, 64, NT_invtype
);
7980 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
7986 /* Fill other bits in vmov encoding for both thumb and arm. */
7988 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
7990 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
7991 neon_write_immbits (immbits
);
7999 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8000 if (inst
.operands
[i
].issingle
8001 && is_quarter_float (inst
.operands
[1].imm
)
8002 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8004 inst
.operands
[1].imm
=
8005 neon_qfloat_bits (v
);
8006 do_vfp_nsyn_opcode ("fconsts");
8010 /* If our host does not support a 64-bit type then we cannot perform
8011 the following optimization. This mean that there will be a
8012 discrepancy between the output produced by an assembler built for
8013 a 32-bit-only host and the output produced from a 64-bit host, but
8014 this cannot be helped. */
8015 #if defined BFD_HOST_64_BIT
8016 else if (!inst
.operands
[1].issingle
8017 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8019 if (is_double_a_single (v
)
8020 && is_quarter_float (double_to_single (v
)))
8022 inst
.operands
[1].imm
=
8023 neon_qfloat_bits (double_to_single (v
));
8024 do_vfp_nsyn_opcode ("fconstd");
8032 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8033 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8036 inst
.operands
[1].reg
= REG_PC
;
8037 inst
.operands
[1].isreg
= 1;
8038 inst
.operands
[1].preind
= 1;
8039 inst
.reloc
.pc_rel
= 1;
8040 inst
.reloc
.type
= (thumb_p
8041 ? BFD_RELOC_ARM_THUMB_OFFSET
8043 ? BFD_RELOC_ARM_HWLITERAL
8044 : BFD_RELOC_ARM_LITERAL
));
8048 /* inst.operands[i] was set up by parse_address. Encode it into an
8049 ARM-format instruction. Reject all forms which cannot be encoded
8050 into a coprocessor load/store instruction. If wb_ok is false,
8051 reject use of writeback; if unind_ok is false, reject use of
8052 unindexed addressing. If reloc_override is not 0, use it instead
8053 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8054 (in which case it is preserved). */
8057 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8059 if (!inst
.operands
[i
].isreg
)
8062 if (! inst
.operands
[0].isvec
)
8064 inst
.error
= _("invalid co-processor operand");
8067 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8071 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8073 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8075 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8077 gas_assert (!inst
.operands
[i
].writeback
);
8080 inst
.error
= _("instruction does not support unindexed addressing");
8083 inst
.instruction
|= inst
.operands
[i
].imm
;
8084 inst
.instruction
|= INDEX_UP
;
8088 if (inst
.operands
[i
].preind
)
8089 inst
.instruction
|= PRE_INDEX
;
8091 if (inst
.operands
[i
].writeback
)
8093 if (inst
.operands
[i
].reg
== REG_PC
)
8095 inst
.error
= _("pc may not be used with write-back");
8100 inst
.error
= _("instruction does not support writeback");
8103 inst
.instruction
|= WRITE_BACK
;
8107 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8108 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8109 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8110 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8113 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8115 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8118 /* Prefer + for zero encoded value. */
8119 if (!inst
.operands
[i
].negative
)
8120 inst
.instruction
|= INDEX_UP
;
8125 /* Functions for instruction encoding, sorted by sub-architecture.
8126 First some generics; their names are taken from the conventional
8127 bit positions for register arguments in ARM format instructions. */
8137 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8143 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8144 inst
.instruction
|= inst
.operands
[1].reg
;
8150 inst
.instruction
|= inst
.operands
[0].reg
;
8151 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8157 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8158 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8164 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8165 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8171 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8172 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8176 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8178 if (ARM_CPU_IS_ANY (cpu_variant
))
8180 as_tsktsk ("%s", msg
);
8183 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8195 unsigned Rn
= inst
.operands
[2].reg
;
8196 /* Enforce restrictions on SWP instruction. */
8197 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8199 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8200 _("Rn must not overlap other operands"));
8202 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8204 if (!check_obsolete (&arm_ext_v8
,
8205 _("swp{b} use is obsoleted for ARMv8 and later"))
8206 && warn_on_deprecated
8207 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8208 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8211 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8212 inst
.instruction
|= inst
.operands
[1].reg
;
8213 inst
.instruction
|= Rn
<< 16;
8219 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8220 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8221 inst
.instruction
|= inst
.operands
[2].reg
;
8227 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8228 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8229 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8230 || inst
.reloc
.exp
.X_add_number
!= 0),
8232 inst
.instruction
|= inst
.operands
[0].reg
;
8233 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8234 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8240 inst
.instruction
|= inst
.operands
[0].imm
;
8246 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8247 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8250 /* ARM instructions, in alphabetical order by function name (except
8251 that wrapper functions appear immediately after the function they
8254 /* This is a pseudo-op of the form "adr rd, label" to be converted
8255 into a relative address of the form "add rd, pc, #label-.-8". */
8260 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8262 /* Frag hacking will turn this into a sub instruction if the offset turns
8263 out to be negative. */
8264 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8265 inst
.reloc
.pc_rel
= 1;
8266 inst
.reloc
.exp
.X_add_number
-= 8;
8269 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8270 into a relative address of the form:
8271 add rd, pc, #low(label-.-8)"
8272 add rd, rd, #high(label-.-8)" */
8277 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8279 /* Frag hacking will turn this into a sub instruction if the offset turns
8280 out to be negative. */
8281 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8282 inst
.reloc
.pc_rel
= 1;
8283 inst
.size
= INSN_SIZE
* 2;
8284 inst
.reloc
.exp
.X_add_number
-= 8;
8290 if (!inst
.operands
[1].present
)
8291 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8292 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8293 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8294 encode_arm_shifter_operand (2);
8300 if (inst
.operands
[0].present
)
8301 inst
.instruction
|= inst
.operands
[0].imm
;
8303 inst
.instruction
|= 0xf;
8309 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8310 constraint (msb
> 32, _("bit-field extends past end of register"));
8311 /* The instruction encoding stores the LSB and MSB,
8312 not the LSB and width. */
8313 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8314 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8315 inst
.instruction
|= (msb
- 1) << 16;
8323 /* #0 in second position is alternative syntax for bfc, which is
8324 the same instruction but with REG_PC in the Rm field. */
8325 if (!inst
.operands
[1].isreg
)
8326 inst
.operands
[1].reg
= REG_PC
;
8328 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8329 constraint (msb
> 32, _("bit-field extends past end of register"));
8330 /* The instruction encoding stores the LSB and MSB,
8331 not the LSB and width. */
8332 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8333 inst
.instruction
|= inst
.operands
[1].reg
;
8334 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8335 inst
.instruction
|= (msb
- 1) << 16;
8341 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8342 _("bit-field extends past end of register"));
8343 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8344 inst
.instruction
|= inst
.operands
[1].reg
;
8345 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8346 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8349 /* ARM V5 breakpoint instruction (argument parse)
8350 BKPT <16 bit unsigned immediate>
8351 Instruction is not conditional.
8352 The bit pattern given in insns[] has the COND_ALWAYS condition,
8353 and it is an error if the caller tried to override that. */
8358 /* Top 12 of 16 bits to bits 19:8. */
8359 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8361 /* Bottom 4 of 16 bits to bits 3:0. */
8362 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8366 encode_branch (int default_reloc
)
8368 if (inst
.operands
[0].hasreloc
)
8370 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8371 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8372 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8373 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8374 ? BFD_RELOC_ARM_PLT32
8375 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8378 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8379 inst
.reloc
.pc_rel
= 1;
8386 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8387 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8390 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8397 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8399 if (inst
.cond
== COND_ALWAYS
)
8400 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8402 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8406 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8409 /* ARM V5 branch-link-exchange instruction (argument parse)
8410 BLX <target_addr> ie BLX(1)
8411 BLX{<condition>} <Rm> ie BLX(2)
8412 Unfortunately, there are two different opcodes for this mnemonic.
8413 So, the insns[].value is not used, and the code here zaps values
8414 into inst.instruction.
8415 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8420 if (inst
.operands
[0].isreg
)
8422 /* Arg is a register; the opcode provided by insns[] is correct.
8423 It is not illegal to do "blx pc", just useless. */
8424 if (inst
.operands
[0].reg
== REG_PC
)
8425 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8427 inst
.instruction
|= inst
.operands
[0].reg
;
8431 /* Arg is an address; this instruction cannot be executed
8432 conditionally, and the opcode must be adjusted.
8433 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8434 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8435 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8436 inst
.instruction
= 0xfa000000;
8437 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8444 bfd_boolean want_reloc
;
8446 if (inst
.operands
[0].reg
== REG_PC
)
8447 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8449 inst
.instruction
|= inst
.operands
[0].reg
;
8450 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8451 it is for ARMv4t or earlier. */
8452 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8453 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8457 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8462 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8466 /* ARM v5TEJ. Jump to Jazelle code. */
8471 if (inst
.operands
[0].reg
== REG_PC
)
8472 as_tsktsk (_("use of r15 in bxj is not really useful"));
8474 inst
.instruction
|= inst
.operands
[0].reg
;
8477 /* Co-processor data operation:
8478 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8479 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8483 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8484 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8485 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8486 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8487 inst
.instruction
|= inst
.operands
[4].reg
;
8488 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8494 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8495 encode_arm_shifter_operand (1);
8498 /* Transfer between coprocessor and ARM registers.
8499 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8504 No special properties. */
8506 struct deprecated_coproc_regs_s
8513 arm_feature_set deprecated
;
8514 arm_feature_set obsoleted
;
8515 const char *dep_msg
;
8516 const char *obs_msg
;
8519 #define DEPR_ACCESS_V8 \
8520 N_("This coprocessor register access is deprecated in ARMv8")
8522 /* Table of all deprecated coprocessor registers. */
8523 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8525 {15, 0, 7, 10, 5, /* CP15DMB. */
8526 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8527 DEPR_ACCESS_V8
, NULL
},
8528 {15, 0, 7, 10, 4, /* CP15DSB. */
8529 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8530 DEPR_ACCESS_V8
, NULL
},
8531 {15, 0, 7, 5, 4, /* CP15ISB. */
8532 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8533 DEPR_ACCESS_V8
, NULL
},
8534 {14, 6, 1, 0, 0, /* TEEHBR. */
8535 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8536 DEPR_ACCESS_V8
, NULL
},
8537 {14, 6, 0, 0, 0, /* TEECR. */
8538 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8539 DEPR_ACCESS_V8
, NULL
},
8542 #undef DEPR_ACCESS_V8
8544 static const size_t deprecated_coproc_reg_count
=
8545 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8553 Rd
= inst
.operands
[2].reg
;
8556 if (inst
.instruction
== 0xee000010
8557 || inst
.instruction
== 0xfe000010)
8559 reject_bad_reg (Rd
);
8562 constraint (Rd
== REG_SP
, BAD_SP
);
8567 if (inst
.instruction
== 0xe000010)
8568 constraint (Rd
== REG_PC
, BAD_PC
);
8571 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8573 const struct deprecated_coproc_regs_s
*r
=
8574 deprecated_coproc_regs
+ i
;
8576 if (inst
.operands
[0].reg
== r
->cp
8577 && inst
.operands
[1].imm
== r
->opc1
8578 && inst
.operands
[3].reg
== r
->crn
8579 && inst
.operands
[4].reg
== r
->crm
8580 && inst
.operands
[5].imm
== r
->opc2
)
8582 if (! ARM_CPU_IS_ANY (cpu_variant
)
8583 && warn_on_deprecated
8584 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8585 as_tsktsk ("%s", r
->dep_msg
);
8589 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8590 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8591 inst
.instruction
|= Rd
<< 12;
8592 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8593 inst
.instruction
|= inst
.operands
[4].reg
;
8594 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8597 /* Transfer between coprocessor register and pair of ARM registers.
8598 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8603 Two XScale instructions are special cases of these:
8605 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8606 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8608 Result unpredictable if Rd or Rn is R15. */
8615 Rd
= inst
.operands
[2].reg
;
8616 Rn
= inst
.operands
[3].reg
;
8620 reject_bad_reg (Rd
);
8621 reject_bad_reg (Rn
);
8625 constraint (Rd
== REG_PC
, BAD_PC
);
8626 constraint (Rn
== REG_PC
, BAD_PC
);
8629 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8630 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8631 inst
.instruction
|= Rd
<< 12;
8632 inst
.instruction
|= Rn
<< 16;
8633 inst
.instruction
|= inst
.operands
[4].reg
;
8639 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8640 if (inst
.operands
[1].present
)
8642 inst
.instruction
|= CPSI_MMOD
;
8643 inst
.instruction
|= inst
.operands
[1].imm
;
8650 inst
.instruction
|= inst
.operands
[0].imm
;
8656 unsigned Rd
, Rn
, Rm
;
8658 Rd
= inst
.operands
[0].reg
;
8659 Rn
= (inst
.operands
[1].present
8660 ? inst
.operands
[1].reg
: Rd
);
8661 Rm
= inst
.operands
[2].reg
;
8663 constraint ((Rd
== REG_PC
), BAD_PC
);
8664 constraint ((Rn
== REG_PC
), BAD_PC
);
8665 constraint ((Rm
== REG_PC
), BAD_PC
);
8667 inst
.instruction
|= Rd
<< 16;
8668 inst
.instruction
|= Rn
<< 0;
8669 inst
.instruction
|= Rm
<< 8;
8675 /* There is no IT instruction in ARM mode. We
8676 process it to do the validation as if in
8677 thumb mode, just in case the code gets
8678 assembled for thumb using the unified syntax. */
8683 set_it_insn_type (IT_INSN
);
8684 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8685 now_it
.cc
= inst
.operands
[0].imm
;
8689 /* If there is only one register in the register list,
8690 then return its register number. Otherwise return -1. */
8692 only_one_reg_in_list (int range
)
8694 int i
= ffs (range
) - 1;
8695 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8699 encode_ldmstm(int from_push_pop_mnem
)
8701 int base_reg
= inst
.operands
[0].reg
;
8702 int range
= inst
.operands
[1].imm
;
8705 inst
.instruction
|= base_reg
<< 16;
8706 inst
.instruction
|= range
;
8708 if (inst
.operands
[1].writeback
)
8709 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8711 if (inst
.operands
[0].writeback
)
8713 inst
.instruction
|= WRITE_BACK
;
8714 /* Check for unpredictable uses of writeback. */
8715 if (inst
.instruction
& LOAD_BIT
)
8717 /* Not allowed in LDM type 2. */
8718 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8719 && ((range
& (1 << REG_PC
)) == 0))
8720 as_warn (_("writeback of base register is UNPREDICTABLE"));
8721 /* Only allowed if base reg not in list for other types. */
8722 else if (range
& (1 << base_reg
))
8723 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8727 /* Not allowed for type 2. */
8728 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8729 as_warn (_("writeback of base register is UNPREDICTABLE"));
8730 /* Only allowed if base reg not in list, or first in list. */
8731 else if ((range
& (1 << base_reg
))
8732 && (range
& ((1 << base_reg
) - 1)))
8733 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8737 /* If PUSH/POP has only one register, then use the A2 encoding. */
8738 one_reg
= only_one_reg_in_list (range
);
8739 if (from_push_pop_mnem
&& one_reg
>= 0)
8741 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8743 inst
.instruction
&= A_COND_MASK
;
8744 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8745 inst
.instruction
|= one_reg
<< 12;
8752 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8755 /* ARMv5TE load-consecutive (argument parse)
8764 constraint (inst
.operands
[0].reg
% 2 != 0,
8765 _("first transfer register must be even"));
8766 constraint (inst
.operands
[1].present
8767 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8768 _("can only transfer two consecutive registers"));
8769 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8770 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8772 if (!inst
.operands
[1].present
)
8773 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8775 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8776 register and the first register written; we have to diagnose
8777 overlap between the base and the second register written here. */
8779 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8780 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8781 as_warn (_("base register written back, and overlaps "
8782 "second transfer register"));
8784 if (!(inst
.instruction
& V4_STR_BIT
))
8786 /* For an index-register load, the index register must not overlap the
8787 destination (even if not write-back). */
8788 if (inst
.operands
[2].immisreg
8789 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8790 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8791 as_warn (_("index register overlaps transfer register"));
8793 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8794 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8800 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8801 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8802 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8803 || inst
.operands
[1].negative
8804 /* This can arise if the programmer has written
8806 or if they have mistakenly used a register name as the last
8809 It is very difficult to distinguish between these two cases
8810 because "rX" might actually be a label. ie the register
8811 name has been occluded by a symbol of the same name. So we
8812 just generate a general 'bad addressing mode' type error
8813 message and leave it up to the programmer to discover the
8814 true cause and fix their mistake. */
8815 || (inst
.operands
[1].reg
== REG_PC
),
8818 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8819 || inst
.reloc
.exp
.X_add_number
!= 0,
8820 _("offset must be zero in ARM encoding"));
8822 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8824 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8825 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8826 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8832 constraint (inst
.operands
[0].reg
% 2 != 0,
8833 _("even register required"));
8834 constraint (inst
.operands
[1].present
8835 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8836 _("can only load two consecutive registers"));
8837 /* If op 1 were present and equal to PC, this function wouldn't
8838 have been called in the first place. */
8839 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8841 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8842 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8845 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8846 which is not a multiple of four is UNPREDICTABLE. */
8848 check_ldr_r15_aligned (void)
8850 constraint (!(inst
.operands
[1].immisreg
)
8851 && (inst
.operands
[0].reg
== REG_PC
8852 && inst
.operands
[1].reg
== REG_PC
8853 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8854 _("ldr to register 15 must be 4-byte alligned"));
8860 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8861 if (!inst
.operands
[1].isreg
)
8862 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8864 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8865 check_ldr_r15_aligned ();
8871 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8873 if (inst
.operands
[1].preind
)
8875 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8876 || inst
.reloc
.exp
.X_add_number
!= 0,
8877 _("this instruction requires a post-indexed address"));
8879 inst
.operands
[1].preind
= 0;
8880 inst
.operands
[1].postind
= 1;
8881 inst
.operands
[1].writeback
= 1;
8883 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8884 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
8887 /* Halfword and signed-byte load/store operations. */
8892 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8893 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8894 if (!inst
.operands
[1].isreg
)
8895 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
8897 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
8903 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8905 if (inst
.operands
[1].preind
)
8907 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8908 || inst
.reloc
.exp
.X_add_number
!= 0,
8909 _("this instruction requires a post-indexed address"));
8911 inst
.operands
[1].preind
= 0;
8912 inst
.operands
[1].postind
= 1;
8913 inst
.operands
[1].writeback
= 1;
8915 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8916 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
8919 /* Co-processor register load/store.
8920 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8924 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8925 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8926 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8932 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8933 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
8934 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
8935 && !(inst
.instruction
& 0x00400000))
8936 as_tsktsk (_("Rd and Rm should be different in mla"));
8938 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8939 inst
.instruction
|= inst
.operands
[1].reg
;
8940 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8941 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8947 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8948 encode_arm_shifter_operand (1);
8951 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8958 top
= (inst
.instruction
& 0x00400000) != 0;
8959 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
8960 _(":lower16: not allowed this instruction"));
8961 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
8962 _(":upper16: not allowed instruction"));
8963 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8964 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
8966 imm
= inst
.reloc
.exp
.X_add_number
;
8967 /* The value is in two pieces: 0:11, 16:19. */
8968 inst
.instruction
|= (imm
& 0x00000fff);
8969 inst
.instruction
|= (imm
& 0x0000f000) << 4;
8974 do_vfp_nsyn_mrs (void)
8976 if (inst
.operands
[0].isvec
)
8978 if (inst
.operands
[1].reg
!= 1)
8979 first_error (_("operand 1 must be FPSCR"));
8980 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
8981 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
8982 do_vfp_nsyn_opcode ("fmstat");
8984 else if (inst
.operands
[1].isvec
)
8985 do_vfp_nsyn_opcode ("fmrx");
8993 do_vfp_nsyn_msr (void)
8995 if (inst
.operands
[0].isvec
)
8996 do_vfp_nsyn_opcode ("fmxr");
9006 unsigned Rt
= inst
.operands
[0].reg
;
9008 if (thumb_mode
&& Rt
== REG_SP
)
9010 inst
.error
= BAD_SP
;
9014 /* APSR_ sets isvec. All other refs to PC are illegal. */
9015 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9017 inst
.error
= BAD_PC
;
9021 /* If we get through parsing the register name, we just insert the number
9022 generated into the instruction without further validation. */
9023 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9024 inst
.instruction
|= (Rt
<< 12);
9030 unsigned Rt
= inst
.operands
[1].reg
;
9033 reject_bad_reg (Rt
);
9034 else if (Rt
== REG_PC
)
9036 inst
.error
= BAD_PC
;
9040 /* If we get through parsing the register name, we just insert the number
9041 generated into the instruction without further validation. */
9042 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9043 inst
.instruction
|= (Rt
<< 12);
9051 if (do_vfp_nsyn_mrs () == SUCCESS
)
9054 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9055 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9057 if (inst
.operands
[1].isreg
)
9059 br
= inst
.operands
[1].reg
;
9060 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9061 as_bad (_("bad register for mrs"));
9065 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9066 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9068 _("'APSR', 'CPSR' or 'SPSR' expected"));
9069 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9072 inst
.instruction
|= br
;
9075 /* Two possible forms:
9076 "{C|S}PSR_<field>, Rm",
9077 "{C|S}PSR_f, #expression". */
9082 if (do_vfp_nsyn_msr () == SUCCESS
)
9085 inst
.instruction
|= inst
.operands
[0].imm
;
9086 if (inst
.operands
[1].isreg
)
9087 inst
.instruction
|= inst
.operands
[1].reg
;
9090 inst
.instruction
|= INST_IMMEDIATE
;
9091 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9092 inst
.reloc
.pc_rel
= 0;
9099 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9101 if (!inst
.operands
[2].present
)
9102 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9103 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9104 inst
.instruction
|= inst
.operands
[1].reg
;
9105 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9107 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9108 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9109 as_tsktsk (_("Rd and Rm should be different in mul"));
9112 /* Long Multiply Parser
9113 UMULL RdLo, RdHi, Rm, Rs
9114 SMULL RdLo, RdHi, Rm, Rs
9115 UMLAL RdLo, RdHi, Rm, Rs
9116 SMLAL RdLo, RdHi, Rm, Rs. */
9121 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9122 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9123 inst
.instruction
|= inst
.operands
[2].reg
;
9124 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9126 /* rdhi and rdlo must be different. */
9127 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9128 as_tsktsk (_("rdhi and rdlo must be different"));
9130 /* rdhi, rdlo and rm must all be different before armv6. */
9131 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9132 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9133 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9134 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9140 if (inst
.operands
[0].present
9141 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9143 /* Architectural NOP hints are CPSR sets with no bits selected. */
9144 inst
.instruction
&= 0xf0000000;
9145 inst
.instruction
|= 0x0320f000;
9146 if (inst
.operands
[0].present
)
9147 inst
.instruction
|= inst
.operands
[0].imm
;
9151 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9152 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9153 Condition defaults to COND_ALWAYS.
9154 Error if Rd, Rn or Rm are R15. */
9159 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9160 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9161 inst
.instruction
|= inst
.operands
[2].reg
;
9162 if (inst
.operands
[3].present
)
9163 encode_arm_shift (3);
9166 /* ARM V6 PKHTB (Argument Parse). */
9171 if (!inst
.operands
[3].present
)
9173 /* If the shift specifier is omitted, turn the instruction
9174 into pkhbt rd, rm, rn. */
9175 inst
.instruction
&= 0xfff00010;
9176 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9177 inst
.instruction
|= inst
.operands
[1].reg
;
9178 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9182 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9183 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9184 inst
.instruction
|= inst
.operands
[2].reg
;
9185 encode_arm_shift (3);
9189 /* ARMv5TE: Preload-Cache
9190 MP Extensions: Preload for write
9194 Syntactically, like LDR with B=1, W=0, L=1. */
9199 constraint (!inst
.operands
[0].isreg
,
9200 _("'[' expected after PLD mnemonic"));
9201 constraint (inst
.operands
[0].postind
,
9202 _("post-indexed expression used in preload instruction"));
9203 constraint (inst
.operands
[0].writeback
,
9204 _("writeback used in preload instruction"));
9205 constraint (!inst
.operands
[0].preind
,
9206 _("unindexed addressing used in preload instruction"));
9207 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9210 /* ARMv7: PLI <addr_mode> */
9214 constraint (!inst
.operands
[0].isreg
,
9215 _("'[' expected after PLI mnemonic"));
9216 constraint (inst
.operands
[0].postind
,
9217 _("post-indexed expression used in preload instruction"));
9218 constraint (inst
.operands
[0].writeback
,
9219 _("writeback used in preload instruction"));
9220 constraint (!inst
.operands
[0].preind
,
9221 _("unindexed addressing used in preload instruction"));
9222 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9223 inst
.instruction
&= ~PRE_INDEX
;
9229 constraint (inst
.operands
[0].writeback
,
9230 _("push/pop do not support {reglist}^"));
9231 inst
.operands
[1] = inst
.operands
[0];
9232 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9233 inst
.operands
[0].isreg
= 1;
9234 inst
.operands
[0].writeback
= 1;
9235 inst
.operands
[0].reg
= REG_SP
;
9236 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9239 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9240 word at the specified address and the following word
9242 Unconditionally executed.
9243 Error if Rn is R15. */
9248 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9249 if (inst
.operands
[0].writeback
)
9250 inst
.instruction
|= WRITE_BACK
;
9253 /* ARM V6 ssat (argument parse). */
9258 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9259 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9260 inst
.instruction
|= inst
.operands
[2].reg
;
9262 if (inst
.operands
[3].present
)
9263 encode_arm_shift (3);
9266 /* ARM V6 usat (argument parse). */
9271 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9272 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9273 inst
.instruction
|= inst
.operands
[2].reg
;
9275 if (inst
.operands
[3].present
)
9276 encode_arm_shift (3);
9279 /* ARM V6 ssat16 (argument parse). */
9284 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9285 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9286 inst
.instruction
|= inst
.operands
[2].reg
;
9292 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9293 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9294 inst
.instruction
|= inst
.operands
[2].reg
;
9297 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9298 preserving the other bits.
9300 setend <endian_specifier>, where <endian_specifier> is either
9306 if (warn_on_deprecated
9307 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9308 as_tsktsk (_("setend use is deprecated for ARMv8"));
9310 if (inst
.operands
[0].imm
)
9311 inst
.instruction
|= 0x200;
9317 unsigned int Rm
= (inst
.operands
[1].present
9318 ? inst
.operands
[1].reg
9319 : inst
.operands
[0].reg
);
9321 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9322 inst
.instruction
|= Rm
;
9323 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9325 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9326 inst
.instruction
|= SHIFT_BY_REG
;
9327 /* PR 12854: Error on extraneous shifts. */
9328 constraint (inst
.operands
[2].shifted
,
9329 _("extraneous shift as part of operand to shift insn"));
9332 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9338 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9339 inst
.reloc
.pc_rel
= 0;
9345 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9346 inst
.reloc
.pc_rel
= 0;
9352 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9353 inst
.reloc
.pc_rel
= 0;
9359 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9360 _("selected processor does not support SETPAN instruction"));
9362 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9368 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9369 _("selected processor does not support SETPAN instruction"));
9371 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9374 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9375 SMLAxy{cond} Rd,Rm,Rs,Rn
9376 SMLAWy{cond} Rd,Rm,Rs,Rn
9377 Error if any register is R15. */
9382 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9383 inst
.instruction
|= inst
.operands
[1].reg
;
9384 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9385 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9388 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9389 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9390 Error if any register is R15.
9391 Warning if Rdlo == Rdhi. */
9396 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9397 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9398 inst
.instruction
|= inst
.operands
[2].reg
;
9399 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9401 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9402 as_tsktsk (_("rdhi and rdlo must be different"));
9405 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9406 SMULxy{cond} Rd,Rm,Rs
9407 Error if any register is R15. */
9412 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9413 inst
.instruction
|= inst
.operands
[1].reg
;
9414 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9417 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9418 the same for both ARM and Thumb-2. */
9425 if (inst
.operands
[0].present
)
9427 reg
= inst
.operands
[0].reg
;
9428 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9433 inst
.instruction
|= reg
<< 16;
9434 inst
.instruction
|= inst
.operands
[1].imm
;
9435 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9436 inst
.instruction
|= WRITE_BACK
;
9439 /* ARM V6 strex (argument parse). */
9444 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9445 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9446 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9447 || inst
.operands
[2].negative
9448 /* See comment in do_ldrex(). */
9449 || (inst
.operands
[2].reg
== REG_PC
),
9452 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9453 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9455 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9456 || inst
.reloc
.exp
.X_add_number
!= 0,
9457 _("offset must be zero in ARM encoding"));
9459 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9460 inst
.instruction
|= inst
.operands
[1].reg
;
9461 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9462 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9468 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9469 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9470 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9471 || inst
.operands
[2].negative
,
9474 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9475 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9483 constraint (inst
.operands
[1].reg
% 2 != 0,
9484 _("even register required"));
9485 constraint (inst
.operands
[2].present
9486 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9487 _("can only store two consecutive registers"));
9488 /* If op 2 were present and equal to PC, this function wouldn't
9489 have been called in the first place. */
9490 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9492 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9493 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9494 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9497 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9498 inst
.instruction
|= inst
.operands
[1].reg
;
9499 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9506 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9507 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9515 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9516 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9521 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9522 extends it to 32-bits, and adds the result to a value in another
9523 register. You can specify a rotation by 0, 8, 16, or 24 bits
9524 before extracting the 16-bit value.
9525 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9526 Condition defaults to COND_ALWAYS.
9527 Error if any register uses R15. */
9532 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9533 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9534 inst
.instruction
|= inst
.operands
[2].reg
;
9535 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9540 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9541 Condition defaults to COND_ALWAYS.
9542 Error if any register uses R15. */
9547 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9548 inst
.instruction
|= inst
.operands
[1].reg
;
9549 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9552 /* VFP instructions. In a logical order: SP variant first, monad
9553 before dyad, arithmetic then move then load/store. */
9556 do_vfp_sp_monadic (void)
9558 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9559 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9563 do_vfp_sp_dyadic (void)
9565 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9566 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9567 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9571 do_vfp_sp_compare_z (void)
9573 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9577 do_vfp_dp_sp_cvt (void)
9579 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9580 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9584 do_vfp_sp_dp_cvt (void)
9586 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9587 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9591 do_vfp_reg_from_sp (void)
9593 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9594 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9598 do_vfp_reg2_from_sp2 (void)
9600 constraint (inst
.operands
[2].imm
!= 2,
9601 _("only two consecutive VFP SP registers allowed here"));
9602 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9603 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9604 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9608 do_vfp_sp_from_reg (void)
9610 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9611 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9615 do_vfp_sp2_from_reg2 (void)
9617 constraint (inst
.operands
[0].imm
!= 2,
9618 _("only two consecutive VFP SP registers allowed here"));
9619 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9620 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9621 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9625 do_vfp_sp_ldst (void)
9627 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9628 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9632 do_vfp_dp_ldst (void)
9634 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9635 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9640 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9642 if (inst
.operands
[0].writeback
)
9643 inst
.instruction
|= WRITE_BACK
;
9645 constraint (ldstm_type
!= VFP_LDSTMIA
,
9646 _("this addressing mode requires base-register writeback"));
9647 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9648 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9649 inst
.instruction
|= inst
.operands
[1].imm
;
9653 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9657 if (inst
.operands
[0].writeback
)
9658 inst
.instruction
|= WRITE_BACK
;
9660 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9661 _("this addressing mode requires base-register writeback"));
9663 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9664 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9666 count
= inst
.operands
[1].imm
<< 1;
9667 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9670 inst
.instruction
|= count
;
9674 do_vfp_sp_ldstmia (void)
9676 vfp_sp_ldstm (VFP_LDSTMIA
);
9680 do_vfp_sp_ldstmdb (void)
9682 vfp_sp_ldstm (VFP_LDSTMDB
);
9686 do_vfp_dp_ldstmia (void)
9688 vfp_dp_ldstm (VFP_LDSTMIA
);
9692 do_vfp_dp_ldstmdb (void)
9694 vfp_dp_ldstm (VFP_LDSTMDB
);
9698 do_vfp_xp_ldstmia (void)
9700 vfp_dp_ldstm (VFP_LDSTMIAX
);
9704 do_vfp_xp_ldstmdb (void)
9706 vfp_dp_ldstm (VFP_LDSTMDBX
);
9710 do_vfp_dp_rd_rm (void)
9712 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9713 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9717 do_vfp_dp_rn_rd (void)
9719 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9720 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9724 do_vfp_dp_rd_rn (void)
9726 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9727 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9731 do_vfp_dp_rd_rn_rm (void)
9733 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9734 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9735 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9741 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9745 do_vfp_dp_rm_rd_rn (void)
9747 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9748 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9749 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9752 /* VFPv3 instructions. */
9754 do_vfp_sp_const (void)
9756 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9757 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9758 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9762 do_vfp_dp_const (void)
9764 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9765 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9766 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9770 vfp_conv (int srcsize
)
9772 int immbits
= srcsize
- inst
.operands
[1].imm
;
9774 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9776 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9777 i.e. immbits must be in range 0 - 16. */
9778 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9781 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9783 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9784 i.e. immbits must be in range 0 - 31. */
9785 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9789 inst
.instruction
|= (immbits
& 1) << 5;
9790 inst
.instruction
|= (immbits
>> 1);
9794 do_vfp_sp_conv_16 (void)
9796 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9801 do_vfp_dp_conv_16 (void)
9803 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9808 do_vfp_sp_conv_32 (void)
9810 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9815 do_vfp_dp_conv_32 (void)
9817 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9821 /* FPA instructions. Also in a logical order. */
9826 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9827 inst
.instruction
|= inst
.operands
[1].reg
;
9831 do_fpa_ldmstm (void)
9833 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9834 switch (inst
.operands
[1].imm
)
9836 case 1: inst
.instruction
|= CP_T_X
; break;
9837 case 2: inst
.instruction
|= CP_T_Y
; break;
9838 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9843 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9845 /* The instruction specified "ea" or "fd", so we can only accept
9846 [Rn]{!}. The instruction does not really support stacking or
9847 unstacking, so we have to emulate these by setting appropriate
9848 bits and offsets. */
9849 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9850 || inst
.reloc
.exp
.X_add_number
!= 0,
9851 _("this instruction does not support indexing"));
9853 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9854 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9856 if (!(inst
.instruction
& INDEX_UP
))
9857 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9859 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9861 inst
.operands
[2].preind
= 0;
9862 inst
.operands
[2].postind
= 1;
9866 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9869 /* iWMMXt instructions: strictly in alphabetical order. */
9872 do_iwmmxt_tandorc (void)
9874 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
9878 do_iwmmxt_textrc (void)
9880 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9881 inst
.instruction
|= inst
.operands
[1].imm
;
9885 do_iwmmxt_textrm (void)
9887 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9888 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9889 inst
.instruction
|= inst
.operands
[2].imm
;
9893 do_iwmmxt_tinsr (void)
9895 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9896 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9897 inst
.instruction
|= inst
.operands
[2].imm
;
9901 do_iwmmxt_tmia (void)
9903 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
9904 inst
.instruction
|= inst
.operands
[1].reg
;
9905 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9909 do_iwmmxt_waligni (void)
9911 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9912 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9913 inst
.instruction
|= inst
.operands
[2].reg
;
9914 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
9918 do_iwmmxt_wmerge (void)
9920 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9921 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9922 inst
.instruction
|= inst
.operands
[2].reg
;
9923 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
9927 do_iwmmxt_wmov (void)
9929 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9930 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9931 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9932 inst
.instruction
|= inst
.operands
[1].reg
;
9936 do_iwmmxt_wldstbh (void)
9939 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9941 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
9943 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
9944 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
9948 do_iwmmxt_wldstw (void)
9950 /* RIWR_RIWC clears .isreg for a control register. */
9951 if (!inst
.operands
[0].isreg
)
9953 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
9954 inst
.instruction
|= 0xf0000000;
9957 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9958 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
9962 do_iwmmxt_wldstd (void)
9964 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9965 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
9966 && inst
.operands
[1].immisreg
)
9968 inst
.instruction
&= ~0x1a000ff;
9969 inst
.instruction
|= (0xfU
<< 28);
9970 if (inst
.operands
[1].preind
)
9971 inst
.instruction
|= PRE_INDEX
;
9972 if (!inst
.operands
[1].negative
)
9973 inst
.instruction
|= INDEX_UP
;
9974 if (inst
.operands
[1].writeback
)
9975 inst
.instruction
|= WRITE_BACK
;
9976 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9977 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
9978 inst
.instruction
|= inst
.operands
[1].imm
;
9981 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
9985 do_iwmmxt_wshufh (void)
9987 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9988 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9989 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
9990 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
9994 do_iwmmxt_wzero (void)
9996 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9997 inst
.instruction
|= inst
.operands
[0].reg
;
9998 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9999 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10003 do_iwmmxt_wrwrwr_or_imm5 (void)
10005 if (inst
.operands
[2].isreg
)
10008 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10009 _("immediate operand requires iWMMXt2"));
10011 if (inst
.operands
[2].imm
== 0)
10013 switch ((inst
.instruction
>> 20) & 0xf)
10019 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10020 inst
.operands
[2].imm
= 16;
10021 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10027 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10028 inst
.operands
[2].imm
= 32;
10029 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10036 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10038 wrn
= (inst
.instruction
>> 16) & 0xf;
10039 inst
.instruction
&= 0xff0fff0f;
10040 inst
.instruction
|= wrn
;
10041 /* Bail out here; the instruction is now assembled. */
10046 /* Map 32 -> 0, etc. */
10047 inst
.operands
[2].imm
&= 0x1f;
10048 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10052 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10053 operations first, then control, shift, and load/store. */
10055 /* Insns like "foo X,Y,Z". */
10058 do_mav_triple (void)
10060 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10061 inst
.instruction
|= inst
.operands
[1].reg
;
10062 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10065 /* Insns like "foo W,X,Y,Z".
10066 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10071 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10072 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10073 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10074 inst
.instruction
|= inst
.operands
[3].reg
;
10077 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10079 do_mav_dspsc (void)
10081 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10084 /* Maverick shift immediate instructions.
10085 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10086 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10089 do_mav_shift (void)
10091 int imm
= inst
.operands
[2].imm
;
10093 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10094 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10096 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10097 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10098 Bit 4 should be 0. */
10099 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10101 inst
.instruction
|= imm
;
10104 /* XScale instructions. Also sorted arithmetic before move. */
10106 /* Xscale multiply-accumulate (argument parse)
10109 MIAxycc acc0,Rm,Rs. */
10114 inst
.instruction
|= inst
.operands
[1].reg
;
10115 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10118 /* Xscale move-accumulator-register (argument parse)
10120 MARcc acc0,RdLo,RdHi. */
10125 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10126 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10129 /* Xscale move-register-accumulator (argument parse)
10131 MRAcc RdLo,RdHi,acc0. */
10136 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10137 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10138 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10141 /* Encoding functions relevant only to Thumb. */
10143 /* inst.operands[i] is a shifted-register operand; encode
10144 it into inst.instruction in the format used by Thumb32. */
10147 encode_thumb32_shifted_operand (int i
)
10149 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10150 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10152 constraint (inst
.operands
[i
].immisreg
,
10153 _("shift by register not allowed in thumb mode"));
10154 inst
.instruction
|= inst
.operands
[i
].reg
;
10155 if (shift
== SHIFT_RRX
)
10156 inst
.instruction
|= SHIFT_ROR
<< 4;
10159 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10160 _("expression too complex"));
10162 constraint (value
> 32
10163 || (value
== 32 && (shift
== SHIFT_LSL
10164 || shift
== SHIFT_ROR
)),
10165 _("shift expression is too large"));
10169 else if (value
== 32)
10172 inst
.instruction
|= shift
<< 4;
10173 inst
.instruction
|= (value
& 0x1c) << 10;
10174 inst
.instruction
|= (value
& 0x03) << 6;
10179 /* inst.operands[i] was set up by parse_address. Encode it into a
10180 Thumb32 format load or store instruction. Reject forms that cannot
10181 be used with such instructions. If is_t is true, reject forms that
10182 cannot be used with a T instruction; if is_d is true, reject forms
10183 that cannot be used with a D instruction. If it is a store insn,
10184 reject PC in Rn. */
10187 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10189 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10191 constraint (!inst
.operands
[i
].isreg
,
10192 _("Instruction does not support =N addresses"));
10194 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10195 if (inst
.operands
[i
].immisreg
)
10197 constraint (is_pc
, BAD_PC_ADDRESSING
);
10198 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10199 constraint (inst
.operands
[i
].negative
,
10200 _("Thumb does not support negative register indexing"));
10201 constraint (inst
.operands
[i
].postind
,
10202 _("Thumb does not support register post-indexing"));
10203 constraint (inst
.operands
[i
].writeback
,
10204 _("Thumb does not support register indexing with writeback"));
10205 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10206 _("Thumb supports only LSL in shifted register indexing"));
10208 inst
.instruction
|= inst
.operands
[i
].imm
;
10209 if (inst
.operands
[i
].shifted
)
10211 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10212 _("expression too complex"));
10213 constraint (inst
.reloc
.exp
.X_add_number
< 0
10214 || inst
.reloc
.exp
.X_add_number
> 3,
10215 _("shift out of range"));
10216 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10218 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10220 else if (inst
.operands
[i
].preind
)
10222 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10223 constraint (is_t
&& inst
.operands
[i
].writeback
,
10224 _("cannot use writeback with this instruction"));
10225 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10226 BAD_PC_ADDRESSING
);
10230 inst
.instruction
|= 0x01000000;
10231 if (inst
.operands
[i
].writeback
)
10232 inst
.instruction
|= 0x00200000;
10236 inst
.instruction
|= 0x00000c00;
10237 if (inst
.operands
[i
].writeback
)
10238 inst
.instruction
|= 0x00000100;
10240 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10242 else if (inst
.operands
[i
].postind
)
10244 gas_assert (inst
.operands
[i
].writeback
);
10245 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10246 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10249 inst
.instruction
|= 0x00200000;
10251 inst
.instruction
|= 0x00000900;
10252 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10254 else /* unindexed - only for coprocessor */
10255 inst
.error
= _("instruction does not accept unindexed addressing");
10258 /* Table of Thumb instructions which exist in both 16- and 32-bit
10259 encodings (the latter only in post-V6T2 cores). The index is the
10260 value used in the insns table below. When there is more than one
10261 possible 16-bit encoding for the instruction, this table always
10263 Also contains several pseudo-instructions used during relaxation. */
10264 #define T16_32_TAB \
10265 X(_adc, 4140, eb400000), \
10266 X(_adcs, 4140, eb500000), \
10267 X(_add, 1c00, eb000000), \
10268 X(_adds, 1c00, eb100000), \
10269 X(_addi, 0000, f1000000), \
10270 X(_addis, 0000, f1100000), \
10271 X(_add_pc,000f, f20f0000), \
10272 X(_add_sp,000d, f10d0000), \
10273 X(_adr, 000f, f20f0000), \
10274 X(_and, 4000, ea000000), \
10275 X(_ands, 4000, ea100000), \
10276 X(_asr, 1000, fa40f000), \
10277 X(_asrs, 1000, fa50f000), \
10278 X(_b, e000, f000b000), \
10279 X(_bcond, d000, f0008000), \
10280 X(_bic, 4380, ea200000), \
10281 X(_bics, 4380, ea300000), \
10282 X(_cmn, 42c0, eb100f00), \
10283 X(_cmp, 2800, ebb00f00), \
10284 X(_cpsie, b660, f3af8400), \
10285 X(_cpsid, b670, f3af8600), \
10286 X(_cpy, 4600, ea4f0000), \
10287 X(_dec_sp,80dd, f1ad0d00), \
10288 X(_eor, 4040, ea800000), \
10289 X(_eors, 4040, ea900000), \
10290 X(_inc_sp,00dd, f10d0d00), \
10291 X(_ldmia, c800, e8900000), \
10292 X(_ldr, 6800, f8500000), \
10293 X(_ldrb, 7800, f8100000), \
10294 X(_ldrh, 8800, f8300000), \
10295 X(_ldrsb, 5600, f9100000), \
10296 X(_ldrsh, 5e00, f9300000), \
10297 X(_ldr_pc,4800, f85f0000), \
10298 X(_ldr_pc2,4800, f85f0000), \
10299 X(_ldr_sp,9800, f85d0000), \
10300 X(_lsl, 0000, fa00f000), \
10301 X(_lsls, 0000, fa10f000), \
10302 X(_lsr, 0800, fa20f000), \
10303 X(_lsrs, 0800, fa30f000), \
10304 X(_mov, 2000, ea4f0000), \
10305 X(_movs, 2000, ea5f0000), \
10306 X(_mul, 4340, fb00f000), \
10307 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10308 X(_mvn, 43c0, ea6f0000), \
10309 X(_mvns, 43c0, ea7f0000), \
10310 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10311 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10312 X(_orr, 4300, ea400000), \
10313 X(_orrs, 4300, ea500000), \
10314 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10315 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10316 X(_rev, ba00, fa90f080), \
10317 X(_rev16, ba40, fa90f090), \
10318 X(_revsh, bac0, fa90f0b0), \
10319 X(_ror, 41c0, fa60f000), \
10320 X(_rors, 41c0, fa70f000), \
10321 X(_sbc, 4180, eb600000), \
10322 X(_sbcs, 4180, eb700000), \
10323 X(_stmia, c000, e8800000), \
10324 X(_str, 6000, f8400000), \
10325 X(_strb, 7000, f8000000), \
10326 X(_strh, 8000, f8200000), \
10327 X(_str_sp,9000, f84d0000), \
10328 X(_sub, 1e00, eba00000), \
10329 X(_subs, 1e00, ebb00000), \
10330 X(_subi, 8000, f1a00000), \
10331 X(_subis, 8000, f1b00000), \
10332 X(_sxtb, b240, fa4ff080), \
10333 X(_sxth, b200, fa0ff080), \
10334 X(_tst, 4200, ea100f00), \
10335 X(_uxtb, b2c0, fa5ff080), \
10336 X(_uxth, b280, fa1ff080), \
10337 X(_nop, bf00, f3af8000), \
10338 X(_yield, bf10, f3af8001), \
10339 X(_wfe, bf20, f3af8002), \
10340 X(_wfi, bf30, f3af8003), \
10341 X(_sev, bf40, f3af8004), \
10342 X(_sevl, bf50, f3af8005), \
10343 X(_udf, de00, f7f0a000)
10345 /* To catch errors in encoding functions, the codes are all offset by
10346 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10347 as 16-bit instructions. */
10348 #define X(a,b,c) T_MNEM##a
10349 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10352 #define X(a,b,c) 0x##b
10353 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10354 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10357 #define X(a,b,c) 0x##c
10358 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10359 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10360 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10364 /* Thumb instruction encoders, in alphabetical order. */
10366 /* ADDW or SUBW. */
10369 do_t_add_sub_w (void)
10373 Rd
= inst
.operands
[0].reg
;
10374 Rn
= inst
.operands
[1].reg
;
10376 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10377 is the SP-{plus,minus}-immediate form of the instruction. */
10379 constraint (Rd
== REG_PC
, BAD_PC
);
10381 reject_bad_reg (Rd
);
10383 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10384 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10387 /* Parse an add or subtract instruction. We get here with inst.instruction
10388 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10391 do_t_add_sub (void)
10395 Rd
= inst
.operands
[0].reg
;
10396 Rs
= (inst
.operands
[1].present
10397 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10398 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10401 set_it_insn_type_last ();
10403 if (unified_syntax
)
10406 bfd_boolean narrow
;
10409 flags
= (inst
.instruction
== T_MNEM_adds
10410 || inst
.instruction
== T_MNEM_subs
);
10412 narrow
= !in_it_block ();
10414 narrow
= in_it_block ();
10415 if (!inst
.operands
[2].isreg
)
10419 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10421 add
= (inst
.instruction
== T_MNEM_add
10422 || inst
.instruction
== T_MNEM_adds
);
10424 if (inst
.size_req
!= 4)
10426 /* Attempt to use a narrow opcode, with relaxation if
10428 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10429 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10430 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10431 opcode
= T_MNEM_add_sp
;
10432 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10433 opcode
= T_MNEM_add_pc
;
10434 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10437 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10439 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10443 inst
.instruction
= THUMB_OP16(opcode
);
10444 inst
.instruction
|= (Rd
<< 4) | Rs
;
10445 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10446 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10447 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10448 if (inst
.size_req
!= 2)
10449 inst
.relax
= opcode
;
10452 constraint (inst
.size_req
== 2, BAD_HIREG
);
10454 if (inst
.size_req
== 4
10455 || (inst
.size_req
!= 2 && !opcode
))
10459 constraint (add
, BAD_PC
);
10460 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10461 _("only SUBS PC, LR, #const allowed"));
10462 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10463 _("expression too complex"));
10464 constraint (inst
.reloc
.exp
.X_add_number
< 0
10465 || inst
.reloc
.exp
.X_add_number
> 0xff,
10466 _("immediate value out of range"));
10467 inst
.instruction
= T2_SUBS_PC_LR
10468 | inst
.reloc
.exp
.X_add_number
;
10469 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10472 else if (Rs
== REG_PC
)
10474 /* Always use addw/subw. */
10475 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10476 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10480 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10481 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10484 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10486 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10488 inst
.instruction
|= Rd
<< 8;
10489 inst
.instruction
|= Rs
<< 16;
10494 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10495 unsigned int shift
= inst
.operands
[2].shift_kind
;
10497 Rn
= inst
.operands
[2].reg
;
10498 /* See if we can do this with a 16-bit instruction. */
10499 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10501 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10506 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10507 || inst
.instruction
== T_MNEM_add
)
10509 : T_OPCODE_SUB_R3
);
10510 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10514 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10516 /* Thumb-1 cores (except v6-M) require at least one high
10517 register in a narrow non flag setting add. */
10518 if (Rd
> 7 || Rn
> 7
10519 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10520 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10527 inst
.instruction
= T_OPCODE_ADD_HI
;
10528 inst
.instruction
|= (Rd
& 8) << 4;
10529 inst
.instruction
|= (Rd
& 7);
10530 inst
.instruction
|= Rn
<< 3;
10536 constraint (Rd
== REG_PC
, BAD_PC
);
10537 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10538 constraint (Rs
== REG_PC
, BAD_PC
);
10539 reject_bad_reg (Rn
);
10541 /* If we get here, it can't be done in 16 bits. */
10542 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10543 _("shift must be constant"));
10544 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10545 inst
.instruction
|= Rd
<< 8;
10546 inst
.instruction
|= Rs
<< 16;
10547 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10548 _("shift value over 3 not allowed in thumb mode"));
10549 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10550 _("only LSL shift allowed in thumb mode"));
10551 encode_thumb32_shifted_operand (2);
10556 constraint (inst
.instruction
== T_MNEM_adds
10557 || inst
.instruction
== T_MNEM_subs
,
10560 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10562 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10563 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10566 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10567 ? 0x0000 : 0x8000);
10568 inst
.instruction
|= (Rd
<< 4) | Rs
;
10569 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10573 Rn
= inst
.operands
[2].reg
;
10574 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10576 /* We now have Rd, Rs, and Rn set to registers. */
10577 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10579 /* Can't do this for SUB. */
10580 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10581 inst
.instruction
= T_OPCODE_ADD_HI
;
10582 inst
.instruction
|= (Rd
& 8) << 4;
10583 inst
.instruction
|= (Rd
& 7);
10585 inst
.instruction
|= Rn
<< 3;
10587 inst
.instruction
|= Rs
<< 3;
10589 constraint (1, _("dest must overlap one source register"));
10593 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10594 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10595 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10605 Rd
= inst
.operands
[0].reg
;
10606 reject_bad_reg (Rd
);
10608 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10610 /* Defer to section relaxation. */
10611 inst
.relax
= inst
.instruction
;
10612 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10613 inst
.instruction
|= Rd
<< 4;
10615 else if (unified_syntax
&& inst
.size_req
!= 2)
10617 /* Generate a 32-bit opcode. */
10618 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10619 inst
.instruction
|= Rd
<< 8;
10620 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10621 inst
.reloc
.pc_rel
= 1;
10625 /* Generate a 16-bit opcode. */
10626 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10627 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10628 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10629 inst
.reloc
.pc_rel
= 1;
10631 inst
.instruction
|= Rd
<< 4;
10635 /* Arithmetic instructions for which there is just one 16-bit
10636 instruction encoding, and it allows only two low registers.
10637 For maximal compatibility with ARM syntax, we allow three register
10638 operands even when Thumb-32 instructions are not available, as long
10639 as the first two are identical. For instance, both "sbc r0,r1" and
10640 "sbc r0,r0,r1" are allowed. */
10646 Rd
= inst
.operands
[0].reg
;
10647 Rs
= (inst
.operands
[1].present
10648 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10649 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10650 Rn
= inst
.operands
[2].reg
;
10652 reject_bad_reg (Rd
);
10653 reject_bad_reg (Rs
);
10654 if (inst
.operands
[2].isreg
)
10655 reject_bad_reg (Rn
);
10657 if (unified_syntax
)
10659 if (!inst
.operands
[2].isreg
)
10661 /* For an immediate, we always generate a 32-bit opcode;
10662 section relaxation will shrink it later if possible. */
10663 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10664 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10665 inst
.instruction
|= Rd
<< 8;
10666 inst
.instruction
|= Rs
<< 16;
10667 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10671 bfd_boolean narrow
;
10673 /* See if we can do this with a 16-bit instruction. */
10674 if (THUMB_SETS_FLAGS (inst
.instruction
))
10675 narrow
= !in_it_block ();
10677 narrow
= in_it_block ();
10679 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10681 if (inst
.operands
[2].shifted
)
10683 if (inst
.size_req
== 4)
10689 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10690 inst
.instruction
|= Rd
;
10691 inst
.instruction
|= Rn
<< 3;
10695 /* If we get here, it can't be done in 16 bits. */
10696 constraint (inst
.operands
[2].shifted
10697 && inst
.operands
[2].immisreg
,
10698 _("shift must be constant"));
10699 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10700 inst
.instruction
|= Rd
<< 8;
10701 inst
.instruction
|= Rs
<< 16;
10702 encode_thumb32_shifted_operand (2);
10707 /* On its face this is a lie - the instruction does set the
10708 flags. However, the only supported mnemonic in this mode
10709 says it doesn't. */
10710 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10712 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10713 _("unshifted register required"));
10714 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10715 constraint (Rd
!= Rs
,
10716 _("dest and source1 must be the same register"));
10718 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10719 inst
.instruction
|= Rd
;
10720 inst
.instruction
|= Rn
<< 3;
10724 /* Similarly, but for instructions where the arithmetic operation is
10725 commutative, so we can allow either of them to be different from
10726 the destination operand in a 16-bit instruction. For instance, all
10727 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10734 Rd
= inst
.operands
[0].reg
;
10735 Rs
= (inst
.operands
[1].present
10736 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10737 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10738 Rn
= inst
.operands
[2].reg
;
10740 reject_bad_reg (Rd
);
10741 reject_bad_reg (Rs
);
10742 if (inst
.operands
[2].isreg
)
10743 reject_bad_reg (Rn
);
10745 if (unified_syntax
)
10747 if (!inst
.operands
[2].isreg
)
10749 /* For an immediate, we always generate a 32-bit opcode;
10750 section relaxation will shrink it later if possible. */
10751 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10752 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10753 inst
.instruction
|= Rd
<< 8;
10754 inst
.instruction
|= Rs
<< 16;
10755 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10759 bfd_boolean narrow
;
10761 /* See if we can do this with a 16-bit instruction. */
10762 if (THUMB_SETS_FLAGS (inst
.instruction
))
10763 narrow
= !in_it_block ();
10765 narrow
= in_it_block ();
10767 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10769 if (inst
.operands
[2].shifted
)
10771 if (inst
.size_req
== 4)
10778 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10779 inst
.instruction
|= Rd
;
10780 inst
.instruction
|= Rn
<< 3;
10785 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10786 inst
.instruction
|= Rd
;
10787 inst
.instruction
|= Rs
<< 3;
10792 /* If we get here, it can't be done in 16 bits. */
10793 constraint (inst
.operands
[2].shifted
10794 && inst
.operands
[2].immisreg
,
10795 _("shift must be constant"));
10796 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10797 inst
.instruction
|= Rd
<< 8;
10798 inst
.instruction
|= Rs
<< 16;
10799 encode_thumb32_shifted_operand (2);
10804 /* On its face this is a lie - the instruction does set the
10805 flags. However, the only supported mnemonic in this mode
10806 says it doesn't. */
10807 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10809 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10810 _("unshifted register required"));
10811 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10813 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10814 inst
.instruction
|= Rd
;
10817 inst
.instruction
|= Rn
<< 3;
10819 inst
.instruction
|= Rs
<< 3;
10821 constraint (1, _("dest must overlap one source register"));
10829 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10830 constraint (msb
> 32, _("bit-field extends past end of register"));
10831 /* The instruction encoding stores the LSB and MSB,
10832 not the LSB and width. */
10833 Rd
= inst
.operands
[0].reg
;
10834 reject_bad_reg (Rd
);
10835 inst
.instruction
|= Rd
<< 8;
10836 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10837 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10838 inst
.instruction
|= msb
- 1;
10847 Rd
= inst
.operands
[0].reg
;
10848 reject_bad_reg (Rd
);
10850 /* #0 in second position is alternative syntax for bfc, which is
10851 the same instruction but with REG_PC in the Rm field. */
10852 if (!inst
.operands
[1].isreg
)
10856 Rn
= inst
.operands
[1].reg
;
10857 reject_bad_reg (Rn
);
10860 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
10861 constraint (msb
> 32, _("bit-field extends past end of register"));
10862 /* The instruction encoding stores the LSB and MSB,
10863 not the LSB and width. */
10864 inst
.instruction
|= Rd
<< 8;
10865 inst
.instruction
|= Rn
<< 16;
10866 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10867 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10868 inst
.instruction
|= msb
- 1;
10876 Rd
= inst
.operands
[0].reg
;
10877 Rn
= inst
.operands
[1].reg
;
10879 reject_bad_reg (Rd
);
10880 reject_bad_reg (Rn
);
10882 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
10883 _("bit-field extends past end of register"));
10884 inst
.instruction
|= Rd
<< 8;
10885 inst
.instruction
|= Rn
<< 16;
10886 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10887 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10888 inst
.instruction
|= inst
.operands
[3].imm
- 1;
10891 /* ARM V5 Thumb BLX (argument parse)
10892 BLX <target_addr> which is BLX(1)
10893 BLX <Rm> which is BLX(2)
10894 Unfortunately, there are two different opcodes for this mnemonic.
10895 So, the insns[].value is not used, and the code here zaps values
10896 into inst.instruction.
10898 ??? How to take advantage of the additional two bits of displacement
10899 available in Thumb32 mode? Need new relocation? */
10904 set_it_insn_type_last ();
10906 if (inst
.operands
[0].isreg
)
10908 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
10909 /* We have a register, so this is BLX(2). */
10910 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
10914 /* No register. This must be BLX(1). */
10915 inst
.instruction
= 0xf000e800;
10916 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
10928 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
10930 if (in_it_block ())
10932 /* Conditional branches inside IT blocks are encoded as unconditional
10934 cond
= COND_ALWAYS
;
10939 if (cond
!= COND_ALWAYS
)
10940 opcode
= T_MNEM_bcond
;
10942 opcode
= inst
.instruction
;
10945 && (inst
.size_req
== 4
10946 || (inst
.size_req
!= 2
10947 && (inst
.operands
[0].hasreloc
10948 || inst
.reloc
.exp
.X_op
== O_constant
))))
10950 inst
.instruction
= THUMB_OP32(opcode
);
10951 if (cond
== COND_ALWAYS
)
10952 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
10955 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
10956 _("selected architecture does not support "
10957 "wide conditional branch instruction"));
10959 gas_assert (cond
!= 0xF);
10960 inst
.instruction
|= cond
<< 22;
10961 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
10966 inst
.instruction
= THUMB_OP16(opcode
);
10967 if (cond
== COND_ALWAYS
)
10968 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
10971 inst
.instruction
|= cond
<< 8;
10972 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
10974 /* Allow section relaxation. */
10975 if (unified_syntax
&& inst
.size_req
!= 2)
10976 inst
.relax
= opcode
;
10978 inst
.reloc
.type
= reloc
;
10979 inst
.reloc
.pc_rel
= 1;
10982 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10983 between the two is the maximum immediate allowed - which is passed in
10986 do_t_bkpt_hlt1 (int range
)
10988 constraint (inst
.cond
!= COND_ALWAYS
,
10989 _("instruction is always unconditional"));
10990 if (inst
.operands
[0].present
)
10992 constraint (inst
.operands
[0].imm
> range
,
10993 _("immediate value out of range"));
10994 inst
.instruction
|= inst
.operands
[0].imm
;
10997 set_it_insn_type (NEUTRAL_IT_INSN
);
11003 do_t_bkpt_hlt1 (63);
11009 do_t_bkpt_hlt1 (255);
11013 do_t_branch23 (void)
11015 set_it_insn_type_last ();
11016 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11018 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11019 this file. We used to simply ignore the PLT reloc type here --
11020 the branch encoding is now needed to deal with TLSCALL relocs.
11021 So if we see a PLT reloc now, put it back to how it used to be to
11022 keep the preexisting behaviour. */
11023 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11024 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11026 #if defined(OBJ_COFF)
11027 /* If the destination of the branch is a defined symbol which does not have
11028 the THUMB_FUNC attribute, then we must be calling a function which has
11029 the (interfacearm) attribute. We look for the Thumb entry point to that
11030 function and change the branch to refer to that function instead. */
11031 if ( inst
.reloc
.exp
.X_op
== O_symbol
11032 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11033 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11034 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11035 inst
.reloc
.exp
.X_add_symbol
=
11036 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11043 set_it_insn_type_last ();
11044 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11045 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11046 should cause the alignment to be checked once it is known. This is
11047 because BX PC only works if the instruction is word aligned. */
11055 set_it_insn_type_last ();
11056 Rm
= inst
.operands
[0].reg
;
11057 reject_bad_reg (Rm
);
11058 inst
.instruction
|= Rm
<< 16;
11067 Rd
= inst
.operands
[0].reg
;
11068 Rm
= inst
.operands
[1].reg
;
11070 reject_bad_reg (Rd
);
11071 reject_bad_reg (Rm
);
11073 inst
.instruction
|= Rd
<< 8;
11074 inst
.instruction
|= Rm
<< 16;
11075 inst
.instruction
|= Rm
;
11081 set_it_insn_type (OUTSIDE_IT_INSN
);
11082 inst
.instruction
|= inst
.operands
[0].imm
;
11088 set_it_insn_type (OUTSIDE_IT_INSN
);
11090 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11091 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11093 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11094 inst
.instruction
= 0xf3af8000;
11095 inst
.instruction
|= imod
<< 9;
11096 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11097 if (inst
.operands
[1].present
)
11098 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11102 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11103 && (inst
.operands
[0].imm
& 4),
11104 _("selected processor does not support 'A' form "
11105 "of this instruction"));
11106 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11107 _("Thumb does not support the 2-argument "
11108 "form of this instruction"));
11109 inst
.instruction
|= inst
.operands
[0].imm
;
11113 /* THUMB CPY instruction (argument parse). */
11118 if (inst
.size_req
== 4)
11120 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11121 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11122 inst
.instruction
|= inst
.operands
[1].reg
;
11126 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11127 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11128 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11135 set_it_insn_type (OUTSIDE_IT_INSN
);
11136 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11137 inst
.instruction
|= inst
.operands
[0].reg
;
11138 inst
.reloc
.pc_rel
= 1;
11139 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11145 inst
.instruction
|= inst
.operands
[0].imm
;
11151 unsigned Rd
, Rn
, Rm
;
11153 Rd
= inst
.operands
[0].reg
;
11154 Rn
= (inst
.operands
[1].present
11155 ? inst
.operands
[1].reg
: Rd
);
11156 Rm
= inst
.operands
[2].reg
;
11158 reject_bad_reg (Rd
);
11159 reject_bad_reg (Rn
);
11160 reject_bad_reg (Rm
);
11162 inst
.instruction
|= Rd
<< 8;
11163 inst
.instruction
|= Rn
<< 16;
11164 inst
.instruction
|= Rm
;
11170 if (unified_syntax
&& inst
.size_req
== 4)
11171 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11173 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11179 unsigned int cond
= inst
.operands
[0].imm
;
11181 set_it_insn_type (IT_INSN
);
11182 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11184 now_it
.warn_deprecated
= FALSE
;
11186 /* If the condition is a negative condition, invert the mask. */
11187 if ((cond
& 0x1) == 0x0)
11189 unsigned int mask
= inst
.instruction
& 0x000f;
11191 if ((mask
& 0x7) == 0)
11193 /* No conversion needed. */
11194 now_it
.block_length
= 1;
11196 else if ((mask
& 0x3) == 0)
11199 now_it
.block_length
= 2;
11201 else if ((mask
& 0x1) == 0)
11204 now_it
.block_length
= 3;
11209 now_it
.block_length
= 4;
11212 inst
.instruction
&= 0xfff0;
11213 inst
.instruction
|= mask
;
11216 inst
.instruction
|= cond
<< 4;
11219 /* Helper function used for both push/pop and ldm/stm. */
11221 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11225 load
= (inst
.instruction
& (1 << 20)) != 0;
11227 if (mask
& (1 << 13))
11228 inst
.error
= _("SP not allowed in register list");
11230 if ((mask
& (1 << base
)) != 0
11232 inst
.error
= _("having the base register in the register list when "
11233 "using write back is UNPREDICTABLE");
11237 if (mask
& (1 << 15))
11239 if (mask
& (1 << 14))
11240 inst
.error
= _("LR and PC should not both be in register list");
11242 set_it_insn_type_last ();
11247 if (mask
& (1 << 15))
11248 inst
.error
= _("PC not allowed in register list");
11251 if ((mask
& (mask
- 1)) == 0)
11253 /* Single register transfers implemented as str/ldr. */
11256 if (inst
.instruction
& (1 << 23))
11257 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11259 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11263 if (inst
.instruction
& (1 << 23))
11264 inst
.instruction
= 0x00800000; /* ia -> [base] */
11266 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11269 inst
.instruction
|= 0xf8400000;
11271 inst
.instruction
|= 0x00100000;
11273 mask
= ffs (mask
) - 1;
11276 else if (writeback
)
11277 inst
.instruction
|= WRITE_BACK
;
11279 inst
.instruction
|= mask
;
11280 inst
.instruction
|= base
<< 16;
11286 /* This really doesn't seem worth it. */
11287 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11288 _("expression too complex"));
11289 constraint (inst
.operands
[1].writeback
,
11290 _("Thumb load/store multiple does not support {reglist}^"));
11292 if (unified_syntax
)
11294 bfd_boolean narrow
;
11298 /* See if we can use a 16-bit instruction. */
11299 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11300 && inst
.size_req
!= 4
11301 && !(inst
.operands
[1].imm
& ~0xff))
11303 mask
= 1 << inst
.operands
[0].reg
;
11305 if (inst
.operands
[0].reg
<= 7)
11307 if (inst
.instruction
== T_MNEM_stmia
11308 ? inst
.operands
[0].writeback
11309 : (inst
.operands
[0].writeback
11310 == !(inst
.operands
[1].imm
& mask
)))
11312 if (inst
.instruction
== T_MNEM_stmia
11313 && (inst
.operands
[1].imm
& mask
)
11314 && (inst
.operands
[1].imm
& (mask
- 1)))
11315 as_warn (_("value stored for r%d is UNKNOWN"),
11316 inst
.operands
[0].reg
);
11318 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11319 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11320 inst
.instruction
|= inst
.operands
[1].imm
;
11323 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11325 /* This means 1 register in reg list one of 3 situations:
11326 1. Instruction is stmia, but without writeback.
11327 2. lmdia without writeback, but with Rn not in
11329 3. ldmia with writeback, but with Rn in reglist.
11330 Case 3 is UNPREDICTABLE behaviour, so we handle
11331 case 1 and 2 which can be converted into a 16-bit
11332 str or ldr. The SP cases are handled below. */
11333 unsigned long opcode
;
11334 /* First, record an error for Case 3. */
11335 if (inst
.operands
[1].imm
& mask
11336 && inst
.operands
[0].writeback
)
11338 _("having the base register in the register list when "
11339 "using write back is UNPREDICTABLE");
11341 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11343 inst
.instruction
= THUMB_OP16 (opcode
);
11344 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11345 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11349 else if (inst
.operands
[0] .reg
== REG_SP
)
11351 if (inst
.operands
[0].writeback
)
11354 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11355 ? T_MNEM_push
: T_MNEM_pop
);
11356 inst
.instruction
|= inst
.operands
[1].imm
;
11359 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11362 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11363 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11364 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11372 if (inst
.instruction
< 0xffff)
11373 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11375 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11376 inst
.operands
[0].writeback
);
11381 constraint (inst
.operands
[0].reg
> 7
11382 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11383 constraint (inst
.instruction
!= T_MNEM_ldmia
11384 && inst
.instruction
!= T_MNEM_stmia
,
11385 _("Thumb-2 instruction only valid in unified syntax"));
11386 if (inst
.instruction
== T_MNEM_stmia
)
11388 if (!inst
.operands
[0].writeback
)
11389 as_warn (_("this instruction will write back the base register"));
11390 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11391 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11392 as_warn (_("value stored for r%d is UNKNOWN"),
11393 inst
.operands
[0].reg
);
11397 if (!inst
.operands
[0].writeback
11398 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11399 as_warn (_("this instruction will write back the base register"));
11400 else if (inst
.operands
[0].writeback
11401 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11402 as_warn (_("this instruction will not write back the base register"));
11405 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11406 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11407 inst
.instruction
|= inst
.operands
[1].imm
;
11414 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11415 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11416 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11417 || inst
.operands
[1].negative
,
11420 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11422 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11423 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11424 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11430 if (!inst
.operands
[1].present
)
11432 constraint (inst
.operands
[0].reg
== REG_LR
,
11433 _("r14 not allowed as first register "
11434 "when second register is omitted"));
11435 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11437 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11440 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11441 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11442 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11448 unsigned long opcode
;
11451 if (inst
.operands
[0].isreg
11452 && !inst
.operands
[0].preind
11453 && inst
.operands
[0].reg
== REG_PC
)
11454 set_it_insn_type_last ();
11456 opcode
= inst
.instruction
;
11457 if (unified_syntax
)
11459 if (!inst
.operands
[1].isreg
)
11461 if (opcode
<= 0xffff)
11462 inst
.instruction
= THUMB_OP32 (opcode
);
11463 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11466 if (inst
.operands
[1].isreg
11467 && !inst
.operands
[1].writeback
11468 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11469 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11470 && opcode
<= 0xffff
11471 && inst
.size_req
!= 4)
11473 /* Insn may have a 16-bit form. */
11474 Rn
= inst
.operands
[1].reg
;
11475 if (inst
.operands
[1].immisreg
)
11477 inst
.instruction
= THUMB_OP16 (opcode
);
11479 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11481 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11482 reject_bad_reg (inst
.operands
[1].imm
);
11484 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11485 && opcode
!= T_MNEM_ldrsb
)
11486 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11487 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11494 if (inst
.reloc
.pc_rel
)
11495 opcode
= T_MNEM_ldr_pc2
;
11497 opcode
= T_MNEM_ldr_pc
;
11501 if (opcode
== T_MNEM_ldr
)
11502 opcode
= T_MNEM_ldr_sp
;
11504 opcode
= T_MNEM_str_sp
;
11506 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11510 inst
.instruction
= inst
.operands
[0].reg
;
11511 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11513 inst
.instruction
|= THUMB_OP16 (opcode
);
11514 if (inst
.size_req
== 2)
11515 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11517 inst
.relax
= opcode
;
11521 /* Definitely a 32-bit variant. */
11523 /* Warning for Erratum 752419. */
11524 if (opcode
== T_MNEM_ldr
11525 && inst
.operands
[0].reg
== REG_SP
11526 && inst
.operands
[1].writeback
== 1
11527 && !inst
.operands
[1].immisreg
)
11529 if (no_cpu_selected ()
11530 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11531 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11532 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11533 as_warn (_("This instruction may be unpredictable "
11534 "if executed on M-profile cores "
11535 "with interrupts enabled."));
11538 /* Do some validations regarding addressing modes. */
11539 if (inst
.operands
[1].immisreg
)
11540 reject_bad_reg (inst
.operands
[1].imm
);
11542 constraint (inst
.operands
[1].writeback
== 1
11543 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11546 inst
.instruction
= THUMB_OP32 (opcode
);
11547 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11548 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11549 check_ldr_r15_aligned ();
11553 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11555 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11557 /* Only [Rn,Rm] is acceptable. */
11558 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11559 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11560 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11561 || inst
.operands
[1].negative
,
11562 _("Thumb does not support this addressing mode"));
11563 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11567 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11568 if (!inst
.operands
[1].isreg
)
11569 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11572 constraint (!inst
.operands
[1].preind
11573 || inst
.operands
[1].shifted
11574 || inst
.operands
[1].writeback
,
11575 _("Thumb does not support this addressing mode"));
11576 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11578 constraint (inst
.instruction
& 0x0600,
11579 _("byte or halfword not valid for base register"));
11580 constraint (inst
.operands
[1].reg
== REG_PC
11581 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11582 _("r15 based store not allowed"));
11583 constraint (inst
.operands
[1].immisreg
,
11584 _("invalid base register for register offset"));
11586 if (inst
.operands
[1].reg
== REG_PC
)
11587 inst
.instruction
= T_OPCODE_LDR_PC
;
11588 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11589 inst
.instruction
= T_OPCODE_LDR_SP
;
11591 inst
.instruction
= T_OPCODE_STR_SP
;
11593 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11594 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11598 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11599 if (!inst
.operands
[1].immisreg
)
11601 /* Immediate offset. */
11602 inst
.instruction
|= inst
.operands
[0].reg
;
11603 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11604 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11608 /* Register offset. */
11609 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11610 constraint (inst
.operands
[1].negative
,
11611 _("Thumb does not support this addressing mode"));
11614 switch (inst
.instruction
)
11616 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11617 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11618 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11619 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11620 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11621 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11622 case 0x5600 /* ldrsb */:
11623 case 0x5e00 /* ldrsh */: break;
11627 inst
.instruction
|= inst
.operands
[0].reg
;
11628 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11629 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11635 if (!inst
.operands
[1].present
)
11637 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11638 constraint (inst
.operands
[0].reg
== REG_LR
,
11639 _("r14 not allowed here"));
11640 constraint (inst
.operands
[0].reg
== REG_R12
,
11641 _("r12 not allowed here"));
11644 if (inst
.operands
[2].writeback
11645 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11646 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11647 as_warn (_("base register written back, and overlaps "
11648 "one of transfer registers"));
11650 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11651 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11652 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11658 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11659 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11665 unsigned Rd
, Rn
, Rm
, Ra
;
11667 Rd
= inst
.operands
[0].reg
;
11668 Rn
= inst
.operands
[1].reg
;
11669 Rm
= inst
.operands
[2].reg
;
11670 Ra
= inst
.operands
[3].reg
;
11672 reject_bad_reg (Rd
);
11673 reject_bad_reg (Rn
);
11674 reject_bad_reg (Rm
);
11675 reject_bad_reg (Ra
);
11677 inst
.instruction
|= Rd
<< 8;
11678 inst
.instruction
|= Rn
<< 16;
11679 inst
.instruction
|= Rm
;
11680 inst
.instruction
|= Ra
<< 12;
11686 unsigned RdLo
, RdHi
, Rn
, Rm
;
11688 RdLo
= inst
.operands
[0].reg
;
11689 RdHi
= inst
.operands
[1].reg
;
11690 Rn
= inst
.operands
[2].reg
;
11691 Rm
= inst
.operands
[3].reg
;
11693 reject_bad_reg (RdLo
);
11694 reject_bad_reg (RdHi
);
11695 reject_bad_reg (Rn
);
11696 reject_bad_reg (Rm
);
11698 inst
.instruction
|= RdLo
<< 12;
11699 inst
.instruction
|= RdHi
<< 8;
11700 inst
.instruction
|= Rn
<< 16;
11701 inst
.instruction
|= Rm
;
11705 do_t_mov_cmp (void)
11709 Rn
= inst
.operands
[0].reg
;
11710 Rm
= inst
.operands
[1].reg
;
11713 set_it_insn_type_last ();
11715 if (unified_syntax
)
11717 int r0off
= (inst
.instruction
== T_MNEM_mov
11718 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11719 unsigned long opcode
;
11720 bfd_boolean narrow
;
11721 bfd_boolean low_regs
;
11723 low_regs
= (Rn
<= 7 && Rm
<= 7);
11724 opcode
= inst
.instruction
;
11725 if (in_it_block ())
11726 narrow
= opcode
!= T_MNEM_movs
;
11728 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11729 if (inst
.size_req
== 4
11730 || inst
.operands
[1].shifted
)
11733 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11734 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11735 && !inst
.operands
[1].shifted
11739 inst
.instruction
= T2_SUBS_PC_LR
;
11743 if (opcode
== T_MNEM_cmp
)
11745 constraint (Rn
== REG_PC
, BAD_PC
);
11748 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11750 warn_deprecated_sp (Rm
);
11751 /* R15 was documented as a valid choice for Rm in ARMv6,
11752 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11753 tools reject R15, so we do too. */
11754 constraint (Rm
== REG_PC
, BAD_PC
);
11757 reject_bad_reg (Rm
);
11759 else if (opcode
== T_MNEM_mov
11760 || opcode
== T_MNEM_movs
)
11762 if (inst
.operands
[1].isreg
)
11764 if (opcode
== T_MNEM_movs
)
11766 reject_bad_reg (Rn
);
11767 reject_bad_reg (Rm
);
11771 /* This is mov.n. */
11772 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11773 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11775 as_tsktsk (_("Use of r%u as a source register is "
11776 "deprecated when r%u is the destination "
11777 "register."), Rm
, Rn
);
11782 /* This is mov.w. */
11783 constraint (Rn
== REG_PC
, BAD_PC
);
11784 constraint (Rm
== REG_PC
, BAD_PC
);
11785 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11789 reject_bad_reg (Rn
);
11792 if (!inst
.operands
[1].isreg
)
11794 /* Immediate operand. */
11795 if (!in_it_block () && opcode
== T_MNEM_mov
)
11797 if (low_regs
&& narrow
)
11799 inst
.instruction
= THUMB_OP16 (opcode
);
11800 inst
.instruction
|= Rn
<< 8;
11801 if (inst
.size_req
== 2)
11803 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11804 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11805 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11808 inst
.relax
= opcode
;
11812 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11813 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11814 inst
.instruction
|= Rn
<< r0off
;
11815 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11818 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11819 && (inst
.instruction
== T_MNEM_mov
11820 || inst
.instruction
== T_MNEM_movs
))
11822 /* Register shifts are encoded as separate shift instructions. */
11823 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11825 if (in_it_block ())
11830 if (inst
.size_req
== 4)
11833 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11839 switch (inst
.operands
[1].shift_kind
)
11842 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11845 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11848 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11851 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
11857 inst
.instruction
= opcode
;
11860 inst
.instruction
|= Rn
;
11861 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
11866 inst
.instruction
|= CONDS_BIT
;
11868 inst
.instruction
|= Rn
<< 8;
11869 inst
.instruction
|= Rm
<< 16;
11870 inst
.instruction
|= inst
.operands
[1].imm
;
11875 /* Some mov with immediate shift have narrow variants.
11876 Register shifts are handled above. */
11877 if (low_regs
&& inst
.operands
[1].shifted
11878 && (inst
.instruction
== T_MNEM_mov
11879 || inst
.instruction
== T_MNEM_movs
))
11881 if (in_it_block ())
11882 narrow
= (inst
.instruction
== T_MNEM_mov
);
11884 narrow
= (inst
.instruction
== T_MNEM_movs
);
11889 switch (inst
.operands
[1].shift_kind
)
11891 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11892 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11893 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11894 default: narrow
= FALSE
; break;
11900 inst
.instruction
|= Rn
;
11901 inst
.instruction
|= Rm
<< 3;
11902 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11906 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11907 inst
.instruction
|= Rn
<< r0off
;
11908 encode_thumb32_shifted_operand (1);
11912 switch (inst
.instruction
)
11915 /* In v4t or v5t a move of two lowregs produces unpredictable
11916 results. Don't allow this. */
11919 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
11920 "MOV Rd, Rs with two low registers is not "
11921 "permitted on this architecture");
11922 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
11926 inst
.instruction
= T_OPCODE_MOV_HR
;
11927 inst
.instruction
|= (Rn
& 0x8) << 4;
11928 inst
.instruction
|= (Rn
& 0x7);
11929 inst
.instruction
|= Rm
<< 3;
11933 /* We know we have low registers at this point.
11934 Generate LSLS Rd, Rs, #0. */
11935 inst
.instruction
= T_OPCODE_LSL_I
;
11936 inst
.instruction
|= Rn
;
11937 inst
.instruction
|= Rm
<< 3;
11943 inst
.instruction
= T_OPCODE_CMP_LR
;
11944 inst
.instruction
|= Rn
;
11945 inst
.instruction
|= Rm
<< 3;
11949 inst
.instruction
= T_OPCODE_CMP_HR
;
11950 inst
.instruction
|= (Rn
& 0x8) << 4;
11951 inst
.instruction
|= (Rn
& 0x7);
11952 inst
.instruction
|= Rm
<< 3;
11959 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11961 /* PR 10443: Do not silently ignore shifted operands. */
11962 constraint (inst
.operands
[1].shifted
,
11963 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11965 if (inst
.operands
[1].isreg
)
11967 if (Rn
< 8 && Rm
< 8)
11969 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11970 since a MOV instruction produces unpredictable results. */
11971 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11972 inst
.instruction
= T_OPCODE_ADD_I3
;
11974 inst
.instruction
= T_OPCODE_CMP_LR
;
11976 inst
.instruction
|= Rn
;
11977 inst
.instruction
|= Rm
<< 3;
11981 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11982 inst
.instruction
= T_OPCODE_MOV_HR
;
11984 inst
.instruction
= T_OPCODE_CMP_HR
;
11990 constraint (Rn
> 7,
11991 _("only lo regs allowed with immediate"));
11992 inst
.instruction
|= Rn
<< 8;
11993 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
12004 top
= (inst
.instruction
& 0x00800000) != 0;
12005 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
12007 constraint (top
, _(":lower16: not allowed this instruction"));
12008 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12010 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12012 constraint (!top
, _(":upper16: not allowed this instruction"));
12013 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12016 Rd
= inst
.operands
[0].reg
;
12017 reject_bad_reg (Rd
);
12019 inst
.instruction
|= Rd
<< 8;
12020 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12022 imm
= inst
.reloc
.exp
.X_add_number
;
12023 inst
.instruction
|= (imm
& 0xf000) << 4;
12024 inst
.instruction
|= (imm
& 0x0800) << 15;
12025 inst
.instruction
|= (imm
& 0x0700) << 4;
12026 inst
.instruction
|= (imm
& 0x00ff);
12031 do_t_mvn_tst (void)
12035 Rn
= inst
.operands
[0].reg
;
12036 Rm
= inst
.operands
[1].reg
;
12038 if (inst
.instruction
== T_MNEM_cmp
12039 || inst
.instruction
== T_MNEM_cmn
)
12040 constraint (Rn
== REG_PC
, BAD_PC
);
12042 reject_bad_reg (Rn
);
12043 reject_bad_reg (Rm
);
12045 if (unified_syntax
)
12047 int r0off
= (inst
.instruction
== T_MNEM_mvn
12048 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12049 bfd_boolean narrow
;
12051 if (inst
.size_req
== 4
12052 || inst
.instruction
> 0xffff
12053 || inst
.operands
[1].shifted
12054 || Rn
> 7 || Rm
> 7)
12056 else if (inst
.instruction
== T_MNEM_cmn
12057 || inst
.instruction
== T_MNEM_tst
)
12059 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12060 narrow
= !in_it_block ();
12062 narrow
= in_it_block ();
12064 if (!inst
.operands
[1].isreg
)
12066 /* For an immediate, we always generate a 32-bit opcode;
12067 section relaxation will shrink it later if possible. */
12068 if (inst
.instruction
< 0xffff)
12069 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12070 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12071 inst
.instruction
|= Rn
<< r0off
;
12072 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12076 /* See if we can do this with a 16-bit instruction. */
12079 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12080 inst
.instruction
|= Rn
;
12081 inst
.instruction
|= Rm
<< 3;
12085 constraint (inst
.operands
[1].shifted
12086 && inst
.operands
[1].immisreg
,
12087 _("shift must be constant"));
12088 if (inst
.instruction
< 0xffff)
12089 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12090 inst
.instruction
|= Rn
<< r0off
;
12091 encode_thumb32_shifted_operand (1);
12097 constraint (inst
.instruction
> 0xffff
12098 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12099 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12100 _("unshifted register required"));
12101 constraint (Rn
> 7 || Rm
> 7,
12104 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12105 inst
.instruction
|= Rn
;
12106 inst
.instruction
|= Rm
<< 3;
12115 if (do_vfp_nsyn_mrs () == SUCCESS
)
12118 Rd
= inst
.operands
[0].reg
;
12119 reject_bad_reg (Rd
);
12120 inst
.instruction
|= Rd
<< 8;
12122 if (inst
.operands
[1].isreg
)
12124 unsigned br
= inst
.operands
[1].reg
;
12125 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12126 as_bad (_("bad register for mrs"));
12128 inst
.instruction
|= br
& (0xf << 16);
12129 inst
.instruction
|= (br
& 0x300) >> 4;
12130 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12134 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12136 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12138 /* PR gas/12698: The constraint is only applied for m_profile.
12139 If the user has specified -march=all, we want to ignore it as
12140 we are building for any CPU type, including non-m variants. */
12141 bfd_boolean m_profile
=
12142 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12143 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12144 "not support requested special purpose register"));
12147 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12149 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12150 _("'APSR', 'CPSR' or 'SPSR' expected"));
12152 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12153 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12154 inst
.instruction
|= 0xf0000;
12164 if (do_vfp_nsyn_msr () == SUCCESS
)
12167 constraint (!inst
.operands
[1].isreg
,
12168 _("Thumb encoding does not support an immediate here"));
12170 if (inst
.operands
[0].isreg
)
12171 flags
= (int)(inst
.operands
[0].reg
);
12173 flags
= inst
.operands
[0].imm
;
12175 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12177 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12179 /* PR gas/12698: The constraint is only applied for m_profile.
12180 If the user has specified -march=all, we want to ignore it as
12181 we are building for any CPU type, including non-m variants. */
12182 bfd_boolean m_profile
=
12183 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12184 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12185 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12186 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12187 && bits
!= PSR_f
)) && m_profile
,
12188 _("selected processor does not support requested special "
12189 "purpose register"));
12192 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12193 "requested special purpose register"));
12195 Rn
= inst
.operands
[1].reg
;
12196 reject_bad_reg (Rn
);
12198 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12199 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12200 inst
.instruction
|= (flags
& 0x300) >> 4;
12201 inst
.instruction
|= (flags
& 0xff);
12202 inst
.instruction
|= Rn
<< 16;
12208 bfd_boolean narrow
;
12209 unsigned Rd
, Rn
, Rm
;
12211 if (!inst
.operands
[2].present
)
12212 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12214 Rd
= inst
.operands
[0].reg
;
12215 Rn
= inst
.operands
[1].reg
;
12216 Rm
= inst
.operands
[2].reg
;
12218 if (unified_syntax
)
12220 if (inst
.size_req
== 4
12226 else if (inst
.instruction
== T_MNEM_muls
)
12227 narrow
= !in_it_block ();
12229 narrow
= in_it_block ();
12233 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12234 constraint (Rn
> 7 || Rm
> 7,
12241 /* 16-bit MULS/Conditional MUL. */
12242 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12243 inst
.instruction
|= Rd
;
12246 inst
.instruction
|= Rm
<< 3;
12248 inst
.instruction
|= Rn
<< 3;
12250 constraint (1, _("dest must overlap one source register"));
12254 constraint (inst
.instruction
!= T_MNEM_mul
,
12255 _("Thumb-2 MUL must not set flags"));
12257 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12258 inst
.instruction
|= Rd
<< 8;
12259 inst
.instruction
|= Rn
<< 16;
12260 inst
.instruction
|= Rm
<< 0;
12262 reject_bad_reg (Rd
);
12263 reject_bad_reg (Rn
);
12264 reject_bad_reg (Rm
);
12271 unsigned RdLo
, RdHi
, Rn
, Rm
;
12273 RdLo
= inst
.operands
[0].reg
;
12274 RdHi
= inst
.operands
[1].reg
;
12275 Rn
= inst
.operands
[2].reg
;
12276 Rm
= inst
.operands
[3].reg
;
12278 reject_bad_reg (RdLo
);
12279 reject_bad_reg (RdHi
);
12280 reject_bad_reg (Rn
);
12281 reject_bad_reg (Rm
);
12283 inst
.instruction
|= RdLo
<< 12;
12284 inst
.instruction
|= RdHi
<< 8;
12285 inst
.instruction
|= Rn
<< 16;
12286 inst
.instruction
|= Rm
;
12289 as_tsktsk (_("rdhi and rdlo must be different"));
12295 set_it_insn_type (NEUTRAL_IT_INSN
);
12297 if (unified_syntax
)
12299 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12301 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12302 inst
.instruction
|= inst
.operands
[0].imm
;
12306 /* PR9722: Check for Thumb2 availability before
12307 generating a thumb2 nop instruction. */
12308 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12310 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12311 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12314 inst
.instruction
= 0x46c0;
12319 constraint (inst
.operands
[0].present
,
12320 _("Thumb does not support NOP with hints"));
12321 inst
.instruction
= 0x46c0;
12328 if (unified_syntax
)
12330 bfd_boolean narrow
;
12332 if (THUMB_SETS_FLAGS (inst
.instruction
))
12333 narrow
= !in_it_block ();
12335 narrow
= in_it_block ();
12336 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12338 if (inst
.size_req
== 4)
12343 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12344 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12345 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12349 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12350 inst
.instruction
|= inst
.operands
[0].reg
;
12351 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12356 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12358 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12360 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12361 inst
.instruction
|= inst
.operands
[0].reg
;
12362 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12371 Rd
= inst
.operands
[0].reg
;
12372 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12374 reject_bad_reg (Rd
);
12375 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12376 reject_bad_reg (Rn
);
12378 inst
.instruction
|= Rd
<< 8;
12379 inst
.instruction
|= Rn
<< 16;
12381 if (!inst
.operands
[2].isreg
)
12383 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12384 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12390 Rm
= inst
.operands
[2].reg
;
12391 reject_bad_reg (Rm
);
12393 constraint (inst
.operands
[2].shifted
12394 && inst
.operands
[2].immisreg
,
12395 _("shift must be constant"));
12396 encode_thumb32_shifted_operand (2);
12403 unsigned Rd
, Rn
, Rm
;
12405 Rd
= inst
.operands
[0].reg
;
12406 Rn
= inst
.operands
[1].reg
;
12407 Rm
= inst
.operands
[2].reg
;
12409 reject_bad_reg (Rd
);
12410 reject_bad_reg (Rn
);
12411 reject_bad_reg (Rm
);
12413 inst
.instruction
|= Rd
<< 8;
12414 inst
.instruction
|= Rn
<< 16;
12415 inst
.instruction
|= Rm
;
12416 if (inst
.operands
[3].present
)
12418 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12419 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12420 _("expression too complex"));
12421 inst
.instruction
|= (val
& 0x1c) << 10;
12422 inst
.instruction
|= (val
& 0x03) << 6;
12429 if (!inst
.operands
[3].present
)
12433 inst
.instruction
&= ~0x00000020;
12435 /* PR 10168. Swap the Rm and Rn registers. */
12436 Rtmp
= inst
.operands
[1].reg
;
12437 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12438 inst
.operands
[2].reg
= Rtmp
;
12446 if (inst
.operands
[0].immisreg
)
12447 reject_bad_reg (inst
.operands
[0].imm
);
12449 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12453 do_t_push_pop (void)
12457 constraint (inst
.operands
[0].writeback
,
12458 _("push/pop do not support {reglist}^"));
12459 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12460 _("expression too complex"));
12462 mask
= inst
.operands
[0].imm
;
12463 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12464 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12465 else if (inst
.size_req
!= 4
12466 && (mask
& ~0xff) == (1 << (inst
.instruction
== T_MNEM_push
12467 ? REG_LR
: REG_PC
)))
12469 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12470 inst
.instruction
|= THUMB_PP_PC_LR
;
12471 inst
.instruction
|= mask
& 0xff;
12473 else if (unified_syntax
)
12475 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12476 encode_thumb2_ldmstm (13, mask
, TRUE
);
12480 inst
.error
= _("invalid register list to push/pop instruction");
12490 Rd
= inst
.operands
[0].reg
;
12491 Rm
= inst
.operands
[1].reg
;
12493 reject_bad_reg (Rd
);
12494 reject_bad_reg (Rm
);
12496 inst
.instruction
|= Rd
<< 8;
12497 inst
.instruction
|= Rm
<< 16;
12498 inst
.instruction
|= Rm
;
12506 Rd
= inst
.operands
[0].reg
;
12507 Rm
= inst
.operands
[1].reg
;
12509 reject_bad_reg (Rd
);
12510 reject_bad_reg (Rm
);
12512 if (Rd
<= 7 && Rm
<= 7
12513 && inst
.size_req
!= 4)
12515 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12516 inst
.instruction
|= Rd
;
12517 inst
.instruction
|= Rm
<< 3;
12519 else if (unified_syntax
)
12521 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12522 inst
.instruction
|= Rd
<< 8;
12523 inst
.instruction
|= Rm
<< 16;
12524 inst
.instruction
|= Rm
;
12527 inst
.error
= BAD_HIREG
;
12535 Rd
= inst
.operands
[0].reg
;
12536 Rm
= inst
.operands
[1].reg
;
12538 reject_bad_reg (Rd
);
12539 reject_bad_reg (Rm
);
12541 inst
.instruction
|= Rd
<< 8;
12542 inst
.instruction
|= Rm
;
12550 Rd
= inst
.operands
[0].reg
;
12551 Rs
= (inst
.operands
[1].present
12552 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12553 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12555 reject_bad_reg (Rd
);
12556 reject_bad_reg (Rs
);
12557 if (inst
.operands
[2].isreg
)
12558 reject_bad_reg (inst
.operands
[2].reg
);
12560 inst
.instruction
|= Rd
<< 8;
12561 inst
.instruction
|= Rs
<< 16;
12562 if (!inst
.operands
[2].isreg
)
12564 bfd_boolean narrow
;
12566 if ((inst
.instruction
& 0x00100000) != 0)
12567 narrow
= !in_it_block ();
12569 narrow
= in_it_block ();
12571 if (Rd
> 7 || Rs
> 7)
12574 if (inst
.size_req
== 4 || !unified_syntax
)
12577 if (inst
.reloc
.exp
.X_op
!= O_constant
12578 || inst
.reloc
.exp
.X_add_number
!= 0)
12581 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12582 relaxation, but it doesn't seem worth the hassle. */
12585 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12586 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12587 inst
.instruction
|= Rs
<< 3;
12588 inst
.instruction
|= Rd
;
12592 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12593 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12597 encode_thumb32_shifted_operand (2);
12603 if (warn_on_deprecated
12604 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12605 as_tsktsk (_("setend use is deprecated for ARMv8"));
12607 set_it_insn_type (OUTSIDE_IT_INSN
);
12608 if (inst
.operands
[0].imm
)
12609 inst
.instruction
|= 0x8;
12615 if (!inst
.operands
[1].present
)
12616 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12618 if (unified_syntax
)
12620 bfd_boolean narrow
;
12623 switch (inst
.instruction
)
12626 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12628 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12630 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12632 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12636 if (THUMB_SETS_FLAGS (inst
.instruction
))
12637 narrow
= !in_it_block ();
12639 narrow
= in_it_block ();
12640 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12642 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12644 if (inst
.operands
[2].isreg
12645 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12646 || inst
.operands
[2].reg
> 7))
12648 if (inst
.size_req
== 4)
12651 reject_bad_reg (inst
.operands
[0].reg
);
12652 reject_bad_reg (inst
.operands
[1].reg
);
12656 if (inst
.operands
[2].isreg
)
12658 reject_bad_reg (inst
.operands
[2].reg
);
12659 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12660 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12661 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12662 inst
.instruction
|= inst
.operands
[2].reg
;
12664 /* PR 12854: Error on extraneous shifts. */
12665 constraint (inst
.operands
[2].shifted
,
12666 _("extraneous shift as part of operand to shift insn"));
12670 inst
.operands
[1].shifted
= 1;
12671 inst
.operands
[1].shift_kind
= shift_kind
;
12672 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12673 ? T_MNEM_movs
: T_MNEM_mov
);
12674 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12675 encode_thumb32_shifted_operand (1);
12676 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12677 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12682 if (inst
.operands
[2].isreg
)
12684 switch (shift_kind
)
12686 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12687 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12688 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12689 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12693 inst
.instruction
|= inst
.operands
[0].reg
;
12694 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12696 /* PR 12854: Error on extraneous shifts. */
12697 constraint (inst
.operands
[2].shifted
,
12698 _("extraneous shift as part of operand to shift insn"));
12702 switch (shift_kind
)
12704 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12705 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12706 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12709 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12710 inst
.instruction
|= inst
.operands
[0].reg
;
12711 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12717 constraint (inst
.operands
[0].reg
> 7
12718 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12719 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12721 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12723 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12724 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12725 _("source1 and dest must be same register"));
12727 switch (inst
.instruction
)
12729 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12730 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12731 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12732 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12736 inst
.instruction
|= inst
.operands
[0].reg
;
12737 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12739 /* PR 12854: Error on extraneous shifts. */
12740 constraint (inst
.operands
[2].shifted
,
12741 _("extraneous shift as part of operand to shift insn"));
12745 switch (inst
.instruction
)
12747 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12748 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12749 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12750 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12753 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12754 inst
.instruction
|= inst
.operands
[0].reg
;
12755 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12763 unsigned Rd
, Rn
, Rm
;
12765 Rd
= inst
.operands
[0].reg
;
12766 Rn
= inst
.operands
[1].reg
;
12767 Rm
= inst
.operands
[2].reg
;
12769 reject_bad_reg (Rd
);
12770 reject_bad_reg (Rn
);
12771 reject_bad_reg (Rm
);
12773 inst
.instruction
|= Rd
<< 8;
12774 inst
.instruction
|= Rn
<< 16;
12775 inst
.instruction
|= Rm
;
12781 unsigned Rd
, Rn
, Rm
;
12783 Rd
= inst
.operands
[0].reg
;
12784 Rm
= inst
.operands
[1].reg
;
12785 Rn
= inst
.operands
[2].reg
;
12787 reject_bad_reg (Rd
);
12788 reject_bad_reg (Rn
);
12789 reject_bad_reg (Rm
);
12791 inst
.instruction
|= Rd
<< 8;
12792 inst
.instruction
|= Rn
<< 16;
12793 inst
.instruction
|= Rm
;
12799 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12800 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12801 _("SMC is not permitted on this architecture"));
12802 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12803 _("expression too complex"));
12804 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12805 inst
.instruction
|= (value
& 0xf000) >> 12;
12806 inst
.instruction
|= (value
& 0x0ff0);
12807 inst
.instruction
|= (value
& 0x000f) << 16;
12808 /* PR gas/15623: SMC instructions must be last in an IT block. */
12809 set_it_insn_type_last ();
12815 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12817 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12818 inst
.instruction
|= (value
& 0x0fff);
12819 inst
.instruction
|= (value
& 0xf000) << 4;
12823 do_t_ssat_usat (int bias
)
12827 Rd
= inst
.operands
[0].reg
;
12828 Rn
= inst
.operands
[2].reg
;
12830 reject_bad_reg (Rd
);
12831 reject_bad_reg (Rn
);
12833 inst
.instruction
|= Rd
<< 8;
12834 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12835 inst
.instruction
|= Rn
<< 16;
12837 if (inst
.operands
[3].present
)
12839 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12841 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12843 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12844 _("expression too complex"));
12846 if (shift_amount
!= 0)
12848 constraint (shift_amount
> 31,
12849 _("shift expression is too large"));
12851 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
12852 inst
.instruction
|= 0x00200000; /* sh bit. */
12854 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
12855 inst
.instruction
|= (shift_amount
& 0x03) << 6;
12863 do_t_ssat_usat (1);
12871 Rd
= inst
.operands
[0].reg
;
12872 Rn
= inst
.operands
[2].reg
;
12874 reject_bad_reg (Rd
);
12875 reject_bad_reg (Rn
);
12877 inst
.instruction
|= Rd
<< 8;
12878 inst
.instruction
|= inst
.operands
[1].imm
- 1;
12879 inst
.instruction
|= Rn
<< 16;
12885 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
12886 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
12887 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
12888 || inst
.operands
[2].negative
,
12891 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
12893 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12894 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12895 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12896 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12902 if (!inst
.operands
[2].present
)
12903 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
12905 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
12906 || inst
.operands
[0].reg
== inst
.operands
[2].reg
12907 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
12910 inst
.instruction
|= inst
.operands
[0].reg
;
12911 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12912 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
12913 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
12919 unsigned Rd
, Rn
, Rm
;
12921 Rd
= inst
.operands
[0].reg
;
12922 Rn
= inst
.operands
[1].reg
;
12923 Rm
= inst
.operands
[2].reg
;
12925 reject_bad_reg (Rd
);
12926 reject_bad_reg (Rn
);
12927 reject_bad_reg (Rm
);
12929 inst
.instruction
|= Rd
<< 8;
12930 inst
.instruction
|= Rn
<< 16;
12931 inst
.instruction
|= Rm
;
12932 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
12940 Rd
= inst
.operands
[0].reg
;
12941 Rm
= inst
.operands
[1].reg
;
12943 reject_bad_reg (Rd
);
12944 reject_bad_reg (Rm
);
12946 if (inst
.instruction
<= 0xffff
12947 && inst
.size_req
!= 4
12948 && Rd
<= 7 && Rm
<= 7
12949 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
12951 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12952 inst
.instruction
|= Rd
;
12953 inst
.instruction
|= Rm
<< 3;
12955 else if (unified_syntax
)
12957 if (inst
.instruction
<= 0xffff)
12958 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12959 inst
.instruction
|= Rd
<< 8;
12960 inst
.instruction
|= Rm
;
12961 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
12965 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
12966 _("Thumb encoding does not support rotation"));
12967 constraint (1, BAD_HIREG
);
12974 /* We have to do the following check manually as ARM_EXT_OS only applies
12976 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
12978 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
12979 /* This only applies to the v6m howver, not later architectures. */
12980 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
12981 as_bad (_("SVC is not permitted on this architecture"));
12982 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
12985 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
12994 half
= (inst
.instruction
& 0x10) != 0;
12995 set_it_insn_type_last ();
12996 constraint (inst
.operands
[0].immisreg
,
12997 _("instruction requires register index"));
12999 Rn
= inst
.operands
[0].reg
;
13000 Rm
= inst
.operands
[0].imm
;
13002 constraint (Rn
== REG_SP
, BAD_SP
);
13003 reject_bad_reg (Rm
);
13005 constraint (!half
&& inst
.operands
[0].shifted
,
13006 _("instruction does not allow shifted index"));
13007 inst
.instruction
|= (Rn
<< 16) | Rm
;
13013 if (!inst
.operands
[0].present
)
13014 inst
.operands
[0].imm
= 0;
13016 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13018 constraint (inst
.size_req
== 2,
13019 _("immediate value out of range"));
13020 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13021 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13022 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13026 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13027 inst
.instruction
|= inst
.operands
[0].imm
;
13030 set_it_insn_type (NEUTRAL_IT_INSN
);
13037 do_t_ssat_usat (0);
13045 Rd
= inst
.operands
[0].reg
;
13046 Rn
= inst
.operands
[2].reg
;
13048 reject_bad_reg (Rd
);
13049 reject_bad_reg (Rn
);
13051 inst
.instruction
|= Rd
<< 8;
13052 inst
.instruction
|= inst
.operands
[1].imm
;
13053 inst
.instruction
|= Rn
<< 16;
13056 /* Neon instruction encoder helpers. */
13058 /* Encodings for the different types for various Neon opcodes. */
13060 /* An "invalid" code for the following tables. */
13063 struct neon_tab_entry
13066 unsigned float_or_poly
;
13067 unsigned scalar_or_imm
;
13070 /* Map overloaded Neon opcodes to their respective encodings. */
13071 #define NEON_ENC_TAB \
13072 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13073 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13074 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13075 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13076 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13077 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13078 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13079 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13080 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13081 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13082 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13083 /* Register variants of the following two instructions are encoded as
13084 vcge / vcgt with the operands reversed. */ \
13085 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13086 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13087 X(vfma, N_INV, 0x0000c10, N_INV), \
13088 X(vfms, N_INV, 0x0200c10, N_INV), \
13089 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13090 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13091 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13092 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13093 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13094 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13095 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13096 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13097 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13098 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13099 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13100 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13101 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13102 X(vshl, 0x0000400, N_INV, 0x0800510), \
13103 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13104 X(vand, 0x0000110, N_INV, 0x0800030), \
13105 X(vbic, 0x0100110, N_INV, 0x0800030), \
13106 X(veor, 0x1000110, N_INV, N_INV), \
13107 X(vorn, 0x0300110, N_INV, 0x0800010), \
13108 X(vorr, 0x0200110, N_INV, 0x0800010), \
13109 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13110 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13111 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13112 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13113 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13114 X(vst1, 0x0000000, 0x0800000, N_INV), \
13115 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13116 X(vst2, 0x0000100, 0x0800100, N_INV), \
13117 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13118 X(vst3, 0x0000200, 0x0800200, N_INV), \
13119 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13120 X(vst4, 0x0000300, 0x0800300, N_INV), \
13121 X(vmovn, 0x1b20200, N_INV, N_INV), \
13122 X(vtrn, 0x1b20080, N_INV, N_INV), \
13123 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13124 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13125 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13126 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13127 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13128 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13129 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13130 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13131 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13132 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13133 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13134 X(vseleq, 0xe000a00, N_INV, N_INV), \
13135 X(vselvs, 0xe100a00, N_INV, N_INV), \
13136 X(vselge, 0xe200a00, N_INV, N_INV), \
13137 X(vselgt, 0xe300a00, N_INV, N_INV), \
13138 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13139 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13140 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13141 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13142 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13143 X(aes, 0x3b00300, N_INV, N_INV), \
13144 X(sha3op, 0x2000c00, N_INV, N_INV), \
13145 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13146 X(sha2op, 0x3ba0380, N_INV, N_INV)
13150 #define X(OPC,I,F,S) N_MNEM_##OPC
13155 static const struct neon_tab_entry neon_enc_tab
[] =
13157 #define X(OPC,I,F,S) { (I), (F), (S) }
13162 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13163 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13164 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13165 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13166 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13167 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13168 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13169 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13170 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13171 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13172 #define NEON_ENC_SINGLE_(X) \
13173 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13174 #define NEON_ENC_DOUBLE_(X) \
13175 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13176 #define NEON_ENC_FPV8_(X) \
13177 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13179 #define NEON_ENCODE(type, inst) \
13182 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13183 inst.is_neon = 1; \
13187 #define check_neon_suffixes \
13190 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13192 as_bad (_("invalid neon suffix for non neon instruction")); \
13198 /* Define shapes for instruction operands. The following mnemonic characters
13199 are used in this table:
13201 F - VFP S<n> register
13202 D - Neon D<n> register
13203 Q - Neon Q<n> register
13207 L - D<n> register list
13209 This table is used to generate various data:
13210 - enumerations of the form NS_DDR to be used as arguments to
13212 - a table classifying shapes into single, double, quad, mixed.
13213 - a table used to drive neon_select_shape. */
13215 #define NEON_SHAPE_DEF \
13216 X(3, (D, D, D), DOUBLE), \
13217 X(3, (Q, Q, Q), QUAD), \
13218 X(3, (D, D, I), DOUBLE), \
13219 X(3, (Q, Q, I), QUAD), \
13220 X(3, (D, D, S), DOUBLE), \
13221 X(3, (Q, Q, S), QUAD), \
13222 X(2, (D, D), DOUBLE), \
13223 X(2, (Q, Q), QUAD), \
13224 X(2, (D, S), DOUBLE), \
13225 X(2, (Q, S), QUAD), \
13226 X(2, (D, R), DOUBLE), \
13227 X(2, (Q, R), QUAD), \
13228 X(2, (D, I), DOUBLE), \
13229 X(2, (Q, I), QUAD), \
13230 X(3, (D, L, D), DOUBLE), \
13231 X(2, (D, Q), MIXED), \
13232 X(2, (Q, D), MIXED), \
13233 X(3, (D, Q, I), MIXED), \
13234 X(3, (Q, D, I), MIXED), \
13235 X(3, (Q, D, D), MIXED), \
13236 X(3, (D, Q, Q), MIXED), \
13237 X(3, (Q, Q, D), MIXED), \
13238 X(3, (Q, D, S), MIXED), \
13239 X(3, (D, Q, S), MIXED), \
13240 X(4, (D, D, D, I), DOUBLE), \
13241 X(4, (Q, Q, Q, I), QUAD), \
13242 X(2, (F, F), SINGLE), \
13243 X(3, (F, F, F), SINGLE), \
13244 X(2, (F, I), SINGLE), \
13245 X(2, (F, D), MIXED), \
13246 X(2, (D, F), MIXED), \
13247 X(3, (F, F, I), MIXED), \
13248 X(4, (R, R, F, F), SINGLE), \
13249 X(4, (F, F, R, R), SINGLE), \
13250 X(3, (D, R, R), DOUBLE), \
13251 X(3, (R, R, D), DOUBLE), \
13252 X(2, (S, R), SINGLE), \
13253 X(2, (R, S), SINGLE), \
13254 X(2, (F, R), SINGLE), \
13255 X(2, (R, F), SINGLE), \
13256 /* Half float shape supported so far. */\
13257 X (2, (H, D), MIXED), \
13258 X (2, (D, H), MIXED), \
13259 X (2, (H, F), MIXED), \
13260 X (2, (F, H), MIXED), \
13261 X (2, (H, H), HALF), \
13262 X (2, (H, R), HALF), \
13263 X (2, (R, H), HALF), \
13264 X (2, (H, I), HALF), \
13265 X (3, (H, H, H), HALF), \
13266 X (3, (H, F, I), MIXED), \
13267 X (3, (F, H, I), MIXED)
13269 #define S2(A,B) NS_##A##B
13270 #define S3(A,B,C) NS_##A##B##C
13271 #define S4(A,B,C,D) NS_##A##B##C##D
13273 #define X(N, L, C) S##N L
13286 enum neon_shape_class
13295 #define X(N, L, C) SC_##C
13297 static enum neon_shape_class neon_shape_class
[] =
13316 /* Register widths of above. */
13317 static unsigned neon_shape_el_size
[] =
13329 struct neon_shape_info
13332 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13335 #define S2(A,B) { SE_##A, SE_##B }
13336 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13337 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13339 #define X(N, L, C) { N, S##N L }
13341 static struct neon_shape_info neon_shape_tab
[] =
13351 /* Bit masks used in type checking given instructions.
13352 'N_EQK' means the type must be the same as (or based on in some way) the key
13353 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13354 set, various other bits can be set as well in order to modify the meaning of
13355 the type constraint. */
13357 enum neon_type_mask
13381 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13382 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13383 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13384 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13385 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13386 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13387 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13388 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13389 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13390 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13391 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13393 N_MAX_NONSPECIAL
= N_P64
13396 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13398 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13399 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13400 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13401 #define N_SUF_32 (N_SU_32 | N_F32)
13402 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13403 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13404 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13406 /* Pass this as the first type argument to neon_check_type to ignore types
13408 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13410 /* Select a "shape" for the current instruction (describing register types or
13411 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13412 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13413 function of operand parsing, so this function doesn't need to be called.
13414 Shapes should be listed in order of decreasing length. */
13416 static enum neon_shape
13417 neon_select_shape (enum neon_shape shape
, ...)
13420 enum neon_shape first_shape
= shape
;
13422 /* Fix missing optional operands. FIXME: we don't know at this point how
13423 many arguments we should have, so this makes the assumption that we have
13424 > 1. This is true of all current Neon opcodes, I think, but may not be
13425 true in the future. */
13426 if (!inst
.operands
[1].present
)
13427 inst
.operands
[1] = inst
.operands
[0];
13429 va_start (ap
, shape
);
13431 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13436 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13438 if (!inst
.operands
[j
].present
)
13444 switch (neon_shape_tab
[shape
].el
[j
])
13446 /* If a .f16, .16, .u16, .s16 type specifier is given over
13447 a VFP single precision register operand, it's essentially
13448 means only half of the register is used.
13450 If the type specifier is given after the mnemonics, the
13451 information is stored in inst.vectype. If the type specifier
13452 is given after register operand, the information is stored
13453 in inst.operands[].vectype.
13455 When there is only one type specifier, and all the register
13456 operands are the same type of hardware register, the type
13457 specifier applies to all register operands.
13459 If no type specifier is given, the shape is inferred from
13460 operand information.
13463 vadd.f16 s0, s1, s2: NS_HHH
13464 vabs.f16 s0, s1: NS_HH
13465 vmov.f16 s0, r1: NS_HR
13466 vmov.f16 r0, s1: NS_RH
13467 vcvt.f16 r0, s1: NS_RH
13468 vcvt.f16.s32 s2, s2, #29: NS_HFI
13469 vcvt.f16.s32 s2, s2: NS_HF
13472 if (!(inst
.operands
[j
].isreg
13473 && inst
.operands
[j
].isvec
13474 && inst
.operands
[j
].issingle
13475 && !inst
.operands
[j
].isquad
13476 && ((inst
.vectype
.elems
== 1
13477 && inst
.vectype
.el
[0].size
== 16)
13478 || (inst
.vectype
.elems
> 1
13479 && inst
.vectype
.el
[j
].size
== 16)
13480 || (inst
.vectype
.elems
== 0
13481 && inst
.operands
[j
].vectype
.type
!= NT_invtype
13482 && inst
.operands
[j
].vectype
.size
== 16))))
13487 if (!(inst
.operands
[j
].isreg
13488 && inst
.operands
[j
].isvec
13489 && inst
.operands
[j
].issingle
13490 && !inst
.operands
[j
].isquad
13491 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
13492 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
13493 || (inst
.vectype
.elems
== 0
13494 && (inst
.operands
[j
].vectype
.size
== 32
13495 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
13500 if (!(inst
.operands
[j
].isreg
13501 && inst
.operands
[j
].isvec
13502 && !inst
.operands
[j
].isquad
13503 && !inst
.operands
[j
].issingle
))
13508 if (!(inst
.operands
[j
].isreg
13509 && !inst
.operands
[j
].isvec
))
13514 if (!(inst
.operands
[j
].isreg
13515 && inst
.operands
[j
].isvec
13516 && inst
.operands
[j
].isquad
13517 && !inst
.operands
[j
].issingle
))
13522 if (!(!inst
.operands
[j
].isreg
13523 && !inst
.operands
[j
].isscalar
))
13528 if (!(!inst
.operands
[j
].isreg
13529 && inst
.operands
[j
].isscalar
))
13539 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13540 /* We've matched all the entries in the shape table, and we don't
13541 have any left over operands which have not been matched. */
13547 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13548 first_error (_("invalid instruction shape"));
13553 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13554 means the Q bit should be set). */
13557 neon_quad (enum neon_shape shape
)
13559 return neon_shape_class
[shape
] == SC_QUAD
;
13563 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13566 /* Allow modification to be made to types which are constrained to be
13567 based on the key element, based on bits set alongside N_EQK. */
13568 if ((typebits
& N_EQK
) != 0)
13570 if ((typebits
& N_HLF
) != 0)
13572 else if ((typebits
& N_DBL
) != 0)
13574 if ((typebits
& N_SGN
) != 0)
13575 *g_type
= NT_signed
;
13576 else if ((typebits
& N_UNS
) != 0)
13577 *g_type
= NT_unsigned
;
13578 else if ((typebits
& N_INT
) != 0)
13579 *g_type
= NT_integer
;
13580 else if ((typebits
& N_FLT
) != 0)
13581 *g_type
= NT_float
;
13582 else if ((typebits
& N_SIZ
) != 0)
13583 *g_type
= NT_untyped
;
13587 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13588 operand type, i.e. the single type specified in a Neon instruction when it
13589 is the only one given. */
13591 static struct neon_type_el
13592 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13594 struct neon_type_el dest
= *key
;
13596 gas_assert ((thisarg
& N_EQK
) != 0);
13598 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13603 /* Convert Neon type and size into compact bitmask representation. */
13605 static enum neon_type_mask
13606 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13613 case 8: return N_8
;
13614 case 16: return N_16
;
13615 case 32: return N_32
;
13616 case 64: return N_64
;
13624 case 8: return N_I8
;
13625 case 16: return N_I16
;
13626 case 32: return N_I32
;
13627 case 64: return N_I64
;
13635 case 16: return N_F16
;
13636 case 32: return N_F32
;
13637 case 64: return N_F64
;
13645 case 8: return N_P8
;
13646 case 16: return N_P16
;
13647 case 64: return N_P64
;
13655 case 8: return N_S8
;
13656 case 16: return N_S16
;
13657 case 32: return N_S32
;
13658 case 64: return N_S64
;
13666 case 8: return N_U8
;
13667 case 16: return N_U16
;
13668 case 32: return N_U32
;
13669 case 64: return N_U64
;
13680 /* Convert compact Neon bitmask type representation to a type and size. Only
13681 handles the case where a single bit is set in the mask. */
13684 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13685 enum neon_type_mask mask
)
13687 if ((mask
& N_EQK
) != 0)
13690 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13692 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13694 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13696 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13701 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13703 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13704 *type
= NT_unsigned
;
13705 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13706 *type
= NT_integer
;
13707 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13708 *type
= NT_untyped
;
13709 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13711 else if ((mask
& (N_F_ALL
)) != 0)
13719 /* Modify a bitmask of allowed types. This is only needed for type
13723 modify_types_allowed (unsigned allowed
, unsigned mods
)
13726 enum neon_el_type type
;
13732 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13734 if (el_type_of_type_chk (&type
, &size
,
13735 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13737 neon_modify_type_size (mods
, &type
, &size
);
13738 destmask
|= type_chk_of_el_type (type
, size
);
13745 /* Check type and return type classification.
13746 The manual states (paraphrase): If one datatype is given, it indicates the
13748 - the second operand, if there is one
13749 - the operand, if there is no second operand
13750 - the result, if there are no operands.
13751 This isn't quite good enough though, so we use a concept of a "key" datatype
13752 which is set on a per-instruction basis, which is the one which matters when
13753 only one data type is written.
13754 Note: this function has side-effects (e.g. filling in missing operands). All
13755 Neon instructions should call it before performing bit encoding. */
13757 static struct neon_type_el
13758 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13761 unsigned i
, pass
, key_el
= 0;
13762 unsigned types
[NEON_MAX_TYPE_ELS
];
13763 enum neon_el_type k_type
= NT_invtype
;
13764 unsigned k_size
= -1u;
13765 struct neon_type_el badtype
= {NT_invtype
, -1};
13766 unsigned key_allowed
= 0;
13768 /* Optional registers in Neon instructions are always (not) in operand 1.
13769 Fill in the missing operand here, if it was omitted. */
13770 if (els
> 1 && !inst
.operands
[1].present
)
13771 inst
.operands
[1] = inst
.operands
[0];
13773 /* Suck up all the varargs. */
13775 for (i
= 0; i
< els
; i
++)
13777 unsigned thisarg
= va_arg (ap
, unsigned);
13778 if (thisarg
== N_IGNORE_TYPE
)
13783 types
[i
] = thisarg
;
13784 if ((thisarg
& N_KEY
) != 0)
13789 if (inst
.vectype
.elems
> 0)
13790 for (i
= 0; i
< els
; i
++)
13791 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13793 first_error (_("types specified in both the mnemonic and operands"));
13797 /* Duplicate inst.vectype elements here as necessary.
13798 FIXME: No idea if this is exactly the same as the ARM assembler,
13799 particularly when an insn takes one register and one non-register
13801 if (inst
.vectype
.elems
== 1 && els
> 1)
13804 inst
.vectype
.elems
= els
;
13805 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13806 for (j
= 0; j
< els
; j
++)
13808 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13811 else if (inst
.vectype
.elems
== 0 && els
> 0)
13814 /* No types were given after the mnemonic, so look for types specified
13815 after each operand. We allow some flexibility here; as long as the
13816 "key" operand has a type, we can infer the others. */
13817 for (j
= 0; j
< els
; j
++)
13818 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13819 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13821 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13823 for (j
= 0; j
< els
; j
++)
13824 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13825 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13830 first_error (_("operand types can't be inferred"));
13834 else if (inst
.vectype
.elems
!= els
)
13836 first_error (_("type specifier has the wrong number of parts"));
13840 for (pass
= 0; pass
< 2; pass
++)
13842 for (i
= 0; i
< els
; i
++)
13844 unsigned thisarg
= types
[i
];
13845 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13846 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
13847 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
13848 unsigned g_size
= inst
.vectype
.el
[i
].size
;
13850 /* Decay more-specific signed & unsigned types to sign-insensitive
13851 integer types if sign-specific variants are unavailable. */
13852 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
13853 && (types_allowed
& N_SU_ALL
) == 0)
13854 g_type
= NT_integer
;
13856 /* If only untyped args are allowed, decay any more specific types to
13857 them. Some instructions only care about signs for some element
13858 sizes, so handle that properly. */
13859 if (((types_allowed
& N_UNT
) == 0)
13860 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
13861 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
13862 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
13863 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
13864 g_type
= NT_untyped
;
13868 if ((thisarg
& N_KEY
) != 0)
13872 key_allowed
= thisarg
& ~N_KEY
;
13877 if ((thisarg
& N_VFP
) != 0)
13879 enum neon_shape_el regshape
;
13880 unsigned regwidth
, match
;
13882 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13885 first_error (_("invalid instruction shape"));
13888 regshape
= neon_shape_tab
[ns
].el
[i
];
13889 regwidth
= neon_shape_el_size
[regshape
];
13891 /* In VFP mode, operands must match register widths. If we
13892 have a key operand, use its width, else use the width of
13893 the current operand. */
13899 if (regwidth
!= match
)
13901 first_error (_("operand size must match register width"));
13906 if ((thisarg
& N_EQK
) == 0)
13908 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
13910 if ((given_type
& types_allowed
) == 0)
13912 first_error (_("bad type in Neon instruction"));
13918 enum neon_el_type mod_k_type
= k_type
;
13919 unsigned mod_k_size
= k_size
;
13920 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
13921 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
13923 first_error (_("inconsistent types in Neon instruction"));
13931 return inst
.vectype
.el
[key_el
];
13934 /* Neon-style VFP instruction forwarding. */
13936 /* Thumb VFP instructions have 0xE in the condition field. */
13939 do_vfp_cond_or_thumb (void)
13944 inst
.instruction
|= 0xe0000000;
13946 inst
.instruction
|= inst
.cond
<< 28;
13949 /* Look up and encode a simple mnemonic, for use as a helper function for the
13950 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13951 etc. It is assumed that operand parsing has already been done, and that the
13952 operands are in the form expected by the given opcode (this isn't necessarily
13953 the same as the form in which they were parsed, hence some massaging must
13954 take place before this function is called).
13955 Checks current arch version against that in the looked-up opcode. */
13958 do_vfp_nsyn_opcode (const char *opname
)
13960 const struct asm_opcode
*opcode
;
13962 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
13967 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
13968 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
13975 inst
.instruction
= opcode
->tvalue
;
13976 opcode
->tencode ();
13980 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
13981 opcode
->aencode ();
13986 do_vfp_nsyn_add_sub (enum neon_shape rs
)
13988 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
13993 do_vfp_nsyn_opcode ("fadds");
13995 do_vfp_nsyn_opcode ("fsubs");
14000 do_vfp_nsyn_opcode ("faddd");
14002 do_vfp_nsyn_opcode ("fsubd");
14006 /* Check operand types to see if this is a VFP instruction, and if so call
14010 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14012 enum neon_shape rs
;
14013 struct neon_type_el et
;
14018 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
14019 et
= neon_check_type (2, rs
,
14020 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14024 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
14025 et
= neon_check_type (3, rs
,
14026 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14033 if (et
.type
!= NT_invtype
)
14044 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14046 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14051 do_vfp_nsyn_opcode ("fmacs");
14053 do_vfp_nsyn_opcode ("fnmacs");
14058 do_vfp_nsyn_opcode ("fmacd");
14060 do_vfp_nsyn_opcode ("fnmacd");
14065 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14067 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14072 do_vfp_nsyn_opcode ("ffmas");
14074 do_vfp_nsyn_opcode ("ffnmas");
14079 do_vfp_nsyn_opcode ("ffmad");
14081 do_vfp_nsyn_opcode ("ffnmad");
14086 do_vfp_nsyn_mul (enum neon_shape rs
)
14089 do_vfp_nsyn_opcode ("fmuls");
14091 do_vfp_nsyn_opcode ("fmuld");
14095 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14097 int is_neg
= (inst
.instruction
& 0x80) != 0;
14098 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
14103 do_vfp_nsyn_opcode ("fnegs");
14105 do_vfp_nsyn_opcode ("fabss");
14110 do_vfp_nsyn_opcode ("fnegd");
14112 do_vfp_nsyn_opcode ("fabsd");
14116 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14117 insns belong to Neon, and are handled elsewhere. */
14120 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14122 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14126 do_vfp_nsyn_opcode ("fldmdbs");
14128 do_vfp_nsyn_opcode ("fldmias");
14133 do_vfp_nsyn_opcode ("fstmdbs");
14135 do_vfp_nsyn_opcode ("fstmias");
14140 do_vfp_nsyn_sqrt (void)
14142 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
14143 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14146 do_vfp_nsyn_opcode ("fsqrts");
14148 do_vfp_nsyn_opcode ("fsqrtd");
14152 do_vfp_nsyn_div (void)
14154 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
14155 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14156 N_F32
| N_F64
| N_KEY
| N_VFP
);
14159 do_vfp_nsyn_opcode ("fdivs");
14161 do_vfp_nsyn_opcode ("fdivd");
14165 do_vfp_nsyn_nmul (void)
14167 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
14168 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14169 N_F32
| N_F64
| N_KEY
| N_VFP
);
14173 NEON_ENCODE (SINGLE
, inst
);
14174 do_vfp_sp_dyadic ();
14178 NEON_ENCODE (DOUBLE
, inst
);
14179 do_vfp_dp_rd_rn_rm ();
14181 do_vfp_cond_or_thumb ();
14185 do_vfp_nsyn_cmp (void)
14187 if (inst
.operands
[1].isreg
)
14189 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
14190 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14194 NEON_ENCODE (SINGLE
, inst
);
14195 do_vfp_sp_monadic ();
14199 NEON_ENCODE (DOUBLE
, inst
);
14200 do_vfp_dp_rd_rm ();
14205 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
14206 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
14208 switch (inst
.instruction
& 0x0fffffff)
14211 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14214 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14222 NEON_ENCODE (SINGLE
, inst
);
14223 do_vfp_sp_compare_z ();
14227 NEON_ENCODE (DOUBLE
, inst
);
14231 do_vfp_cond_or_thumb ();
14235 nsyn_insert_sp (void)
14237 inst
.operands
[1] = inst
.operands
[0];
14238 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14239 inst
.operands
[0].reg
= REG_SP
;
14240 inst
.operands
[0].isreg
= 1;
14241 inst
.operands
[0].writeback
= 1;
14242 inst
.operands
[0].present
= 1;
14246 do_vfp_nsyn_push (void)
14249 if (inst
.operands
[1].issingle
)
14250 do_vfp_nsyn_opcode ("fstmdbs");
14252 do_vfp_nsyn_opcode ("fstmdbd");
14256 do_vfp_nsyn_pop (void)
14259 if (inst
.operands
[1].issingle
)
14260 do_vfp_nsyn_opcode ("fldmias");
14262 do_vfp_nsyn_opcode ("fldmiad");
14265 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14266 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14269 neon_dp_fixup (struct arm_it
* insn
)
14271 unsigned int i
= insn
->instruction
;
14276 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14287 insn
->instruction
= i
;
14290 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14294 neon_logbits (unsigned x
)
14296 return ffs (x
) - 4;
14299 #define LOW4(R) ((R) & 0xf)
14300 #define HI1(R) (((R) >> 4) & 1)
14302 /* Encode insns with bit pattern:
14304 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14305 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14307 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14308 different meaning for some instruction. */
14311 neon_three_same (int isquad
, int ubit
, int size
)
14313 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14314 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14315 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14316 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14317 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14318 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14319 inst
.instruction
|= (isquad
!= 0) << 6;
14320 inst
.instruction
|= (ubit
!= 0) << 24;
14322 inst
.instruction
|= neon_logbits (size
) << 20;
14324 neon_dp_fixup (&inst
);
14327 /* Encode instructions of the form:
14329 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14330 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14332 Don't write size if SIZE == -1. */
14335 neon_two_same (int qbit
, int ubit
, int size
)
14337 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14338 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14339 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14340 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14341 inst
.instruction
|= (qbit
!= 0) << 6;
14342 inst
.instruction
|= (ubit
!= 0) << 24;
14345 inst
.instruction
|= neon_logbits (size
) << 18;
14347 neon_dp_fixup (&inst
);
14350 /* Neon instruction encoders, in approximate order of appearance. */
14353 do_neon_dyadic_i_su (void)
14355 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14356 struct neon_type_el et
= neon_check_type (3, rs
,
14357 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14358 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14362 do_neon_dyadic_i64_su (void)
14364 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14365 struct neon_type_el et
= neon_check_type (3, rs
,
14366 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14367 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14371 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14374 unsigned size
= et
.size
>> 3;
14375 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14376 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14377 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14378 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14379 inst
.instruction
|= (isquad
!= 0) << 6;
14380 inst
.instruction
|= immbits
<< 16;
14381 inst
.instruction
|= (size
>> 3) << 7;
14382 inst
.instruction
|= (size
& 0x7) << 19;
14384 inst
.instruction
|= (uval
!= 0) << 24;
14386 neon_dp_fixup (&inst
);
14390 do_neon_shl_imm (void)
14392 if (!inst
.operands
[2].isreg
)
14394 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14395 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14396 int imm
= inst
.operands
[2].imm
;
14398 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14399 _("immediate out of range for shift"));
14400 NEON_ENCODE (IMMED
, inst
);
14401 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14405 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14406 struct neon_type_el et
= neon_check_type (3, rs
,
14407 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14410 /* VSHL/VQSHL 3-register variants have syntax such as:
14412 whereas other 3-register operations encoded by neon_three_same have
14415 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14417 tmp
= inst
.operands
[2].reg
;
14418 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14419 inst
.operands
[1].reg
= tmp
;
14420 NEON_ENCODE (INTEGER
, inst
);
14421 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14426 do_neon_qshl_imm (void)
14428 if (!inst
.operands
[2].isreg
)
14430 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14431 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14432 int imm
= inst
.operands
[2].imm
;
14434 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14435 _("immediate out of range for shift"));
14436 NEON_ENCODE (IMMED
, inst
);
14437 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14441 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14442 struct neon_type_el et
= neon_check_type (3, rs
,
14443 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14446 /* See note in do_neon_shl_imm. */
14447 tmp
= inst
.operands
[2].reg
;
14448 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14449 inst
.operands
[1].reg
= tmp
;
14450 NEON_ENCODE (INTEGER
, inst
);
14451 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14456 do_neon_rshl (void)
14458 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14459 struct neon_type_el et
= neon_check_type (3, rs
,
14460 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14463 tmp
= inst
.operands
[2].reg
;
14464 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14465 inst
.operands
[1].reg
= tmp
;
14466 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14470 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14472 /* Handle .I8 pseudo-instructions. */
14475 /* Unfortunately, this will make everything apart from zero out-of-range.
14476 FIXME is this the intended semantics? There doesn't seem much point in
14477 accepting .I8 if so. */
14478 immediate
|= immediate
<< 8;
14484 if (immediate
== (immediate
& 0x000000ff))
14486 *immbits
= immediate
;
14489 else if (immediate
== (immediate
& 0x0000ff00))
14491 *immbits
= immediate
>> 8;
14494 else if (immediate
== (immediate
& 0x00ff0000))
14496 *immbits
= immediate
>> 16;
14499 else if (immediate
== (immediate
& 0xff000000))
14501 *immbits
= immediate
>> 24;
14504 if ((immediate
& 0xffff) != (immediate
>> 16))
14505 goto bad_immediate
;
14506 immediate
&= 0xffff;
14509 if (immediate
== (immediate
& 0x000000ff))
14511 *immbits
= immediate
;
14514 else if (immediate
== (immediate
& 0x0000ff00))
14516 *immbits
= immediate
>> 8;
14521 first_error (_("immediate value out of range"));
14526 do_neon_logic (void)
14528 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14530 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14531 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14532 /* U bit and size field were set as part of the bitmask. */
14533 NEON_ENCODE (INTEGER
, inst
);
14534 neon_three_same (neon_quad (rs
), 0, -1);
14538 const int three_ops_form
= (inst
.operands
[2].present
14539 && !inst
.operands
[2].isreg
);
14540 const int immoperand
= (three_ops_form
? 2 : 1);
14541 enum neon_shape rs
= (three_ops_form
14542 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14543 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14544 struct neon_type_el et
= neon_check_type (2, rs
,
14545 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14546 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14550 if (et
.type
== NT_invtype
)
14553 if (three_ops_form
)
14554 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14555 _("first and second operands shall be the same register"));
14557 NEON_ENCODE (IMMED
, inst
);
14559 immbits
= inst
.operands
[immoperand
].imm
;
14562 /* .i64 is a pseudo-op, so the immediate must be a repeating
14564 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14565 inst
.operands
[immoperand
].reg
: 0))
14567 /* Set immbits to an invalid constant. */
14568 immbits
= 0xdeadbeef;
14575 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14579 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14583 /* Pseudo-instruction for VBIC. */
14584 neon_invert_size (&immbits
, 0, et
.size
);
14585 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14589 /* Pseudo-instruction for VORR. */
14590 neon_invert_size (&immbits
, 0, et
.size
);
14591 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14601 inst
.instruction
|= neon_quad (rs
) << 6;
14602 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14603 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14604 inst
.instruction
|= cmode
<< 8;
14605 neon_write_immbits (immbits
);
14607 neon_dp_fixup (&inst
);
14612 do_neon_bitfield (void)
14614 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14615 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14616 neon_three_same (neon_quad (rs
), 0, -1);
14620 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14623 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14624 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14626 if (et
.type
== NT_float
)
14628 NEON_ENCODE (FLOAT
, inst
);
14629 neon_three_same (neon_quad (rs
), 0, -1);
14633 NEON_ENCODE (INTEGER
, inst
);
14634 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14639 do_neon_dyadic_if_su (void)
14641 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14645 do_neon_dyadic_if_su_d (void)
14647 /* This version only allow D registers, but that constraint is enforced during
14648 operand parsing so we don't need to do anything extra here. */
14649 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14653 do_neon_dyadic_if_i_d (void)
14655 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14656 affected if we specify unsigned args. */
14657 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14660 enum vfp_or_neon_is_neon_bits
14663 NEON_CHECK_ARCH
= 2,
14664 NEON_CHECK_ARCH8
= 4
14667 /* Call this function if an instruction which may have belonged to the VFP or
14668 Neon instruction sets, but turned out to be a Neon instruction (due to the
14669 operand types involved, etc.). We have to check and/or fix-up a couple of
14672 - Make sure the user hasn't attempted to make a Neon instruction
14674 - Alter the value in the condition code field if necessary.
14675 - Make sure that the arch supports Neon instructions.
14677 Which of these operations take place depends on bits from enum
14678 vfp_or_neon_is_neon_bits.
14680 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14681 current instruction's condition is COND_ALWAYS, the condition field is
14682 changed to inst.uncond_value. This is necessary because instructions shared
14683 between VFP and Neon may be conditional for the VFP variants only, and the
14684 unconditional Neon version must have, e.g., 0xF in the condition field. */
14687 vfp_or_neon_is_neon (unsigned check
)
14689 /* Conditions are always legal in Thumb mode (IT blocks). */
14690 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14692 if (inst
.cond
!= COND_ALWAYS
)
14694 first_error (_(BAD_COND
));
14697 if (inst
.uncond_value
!= -1)
14698 inst
.instruction
|= inst
.uncond_value
<< 28;
14701 if ((check
& NEON_CHECK_ARCH
)
14702 && !mark_feature_used (&fpu_neon_ext_v1
))
14704 first_error (_(BAD_FPU
));
14708 if ((check
& NEON_CHECK_ARCH8
)
14709 && !mark_feature_used (&fpu_neon_ext_armv8
))
14711 first_error (_(BAD_FPU
));
14719 do_neon_addsub_if_i (void)
14721 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14724 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14727 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14728 affected if we specify unsigned args. */
14729 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14732 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14734 V<op> A,B (A is operand 0, B is operand 2)
14739 so handle that case specially. */
14742 neon_exchange_operands (void)
14744 void *scratch
= alloca (sizeof (inst
.operands
[0]));
14745 if (inst
.operands
[1].present
)
14747 /* Swap operands[1] and operands[2]. */
14748 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14749 inst
.operands
[1] = inst
.operands
[2];
14750 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14754 inst
.operands
[1] = inst
.operands
[2];
14755 inst
.operands
[2] = inst
.operands
[0];
14760 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14762 if (inst
.operands
[2].isreg
)
14765 neon_exchange_operands ();
14766 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14770 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14771 struct neon_type_el et
= neon_check_type (2, rs
,
14772 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
14774 NEON_ENCODE (IMMED
, inst
);
14775 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14776 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14777 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14778 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14779 inst
.instruction
|= neon_quad (rs
) << 6;
14780 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14781 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14783 neon_dp_fixup (&inst
);
14790 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
14794 do_neon_cmp_inv (void)
14796 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
14802 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
14805 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14806 scalars, which are encoded in 5 bits, M : Rm.
14807 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14808 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14812 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
14814 unsigned regno
= NEON_SCALAR_REG (scalar
);
14815 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
14820 if (regno
> 7 || elno
> 3)
14822 return regno
| (elno
<< 3);
14825 if (regno
> 15 || elno
> 1)
14827 return regno
| (elno
<< 4);
14831 first_error (_("scalar out of range for multiply instruction"));
14837 /* Encode multiply / multiply-accumulate scalar instructions. */
14840 neon_mul_mac (struct neon_type_el et
, int ubit
)
14844 /* Give a more helpful error message if we have an invalid type. */
14845 if (et
.type
== NT_invtype
)
14848 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
14849 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14850 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14851 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14852 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14853 inst
.instruction
|= LOW4 (scalar
);
14854 inst
.instruction
|= HI1 (scalar
) << 5;
14855 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14856 inst
.instruction
|= neon_logbits (et
.size
) << 20;
14857 inst
.instruction
|= (ubit
!= 0) << 24;
14859 neon_dp_fixup (&inst
);
14863 do_neon_mac_maybe_scalar (void)
14865 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
14868 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14871 if (inst
.operands
[2].isscalar
)
14873 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14874 struct neon_type_el et
= neon_check_type (3, rs
,
14875 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
14876 NEON_ENCODE (SCALAR
, inst
);
14877 neon_mul_mac (et
, neon_quad (rs
));
14881 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14882 affected if we specify unsigned args. */
14883 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14888 do_neon_fmac (void)
14890 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
14893 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14896 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14902 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14903 struct neon_type_el et
= neon_check_type (3, rs
,
14904 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14905 neon_three_same (neon_quad (rs
), 0, et
.size
);
14908 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14909 same types as the MAC equivalents. The polynomial type for this instruction
14910 is encoded the same as the integer type. */
14915 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
14918 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14921 if (inst
.operands
[2].isscalar
)
14922 do_neon_mac_maybe_scalar ();
14924 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
14928 do_neon_qdmulh (void)
14930 if (inst
.operands
[2].isscalar
)
14932 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14933 struct neon_type_el et
= neon_check_type (3, rs
,
14934 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14935 NEON_ENCODE (SCALAR
, inst
);
14936 neon_mul_mac (et
, neon_quad (rs
));
14940 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14941 struct neon_type_el et
= neon_check_type (3, rs
,
14942 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14943 NEON_ENCODE (INTEGER
, inst
);
14944 /* The U bit (rounding) comes from bit mask. */
14945 neon_three_same (neon_quad (rs
), 0, et
.size
);
14950 do_neon_fcmp_absolute (void)
14952 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14953 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14954 /* Size field comes from bit mask. */
14955 neon_three_same (neon_quad (rs
), 1, -1);
14959 do_neon_fcmp_absolute_inv (void)
14961 neon_exchange_operands ();
14962 do_neon_fcmp_absolute ();
14966 do_neon_step (void)
14968 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14969 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14970 neon_three_same (neon_quad (rs
), 0, -1);
14974 do_neon_abs_neg (void)
14976 enum neon_shape rs
;
14977 struct neon_type_el et
;
14979 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
14982 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14985 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14986 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
14988 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14989 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14990 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14991 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14992 inst
.instruction
|= neon_quad (rs
) << 6;
14993 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14994 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14996 neon_dp_fixup (&inst
);
15002 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15003 struct neon_type_el et
= neon_check_type (2, rs
,
15004 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15005 int imm
= inst
.operands
[2].imm
;
15006 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15007 _("immediate out of range for insert"));
15008 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15014 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15015 struct neon_type_el et
= neon_check_type (2, rs
,
15016 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15017 int imm
= inst
.operands
[2].imm
;
15018 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15019 _("immediate out of range for insert"));
15020 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15024 do_neon_qshlu_imm (void)
15026 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15027 struct neon_type_el et
= neon_check_type (2, rs
,
15028 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15029 int imm
= inst
.operands
[2].imm
;
15030 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15031 _("immediate out of range for shift"));
15032 /* Only encodes the 'U present' variant of the instruction.
15033 In this case, signed types have OP (bit 8) set to 0.
15034 Unsigned types have OP set to 1. */
15035 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15036 /* The rest of the bits are the same as other immediate shifts. */
15037 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15041 do_neon_qmovn (void)
15043 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15044 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15045 /* Saturating move where operands can be signed or unsigned, and the
15046 destination has the same signedness. */
15047 NEON_ENCODE (INTEGER
, inst
);
15048 if (et
.type
== NT_unsigned
)
15049 inst
.instruction
|= 0xc0;
15051 inst
.instruction
|= 0x80;
15052 neon_two_same (0, 1, et
.size
/ 2);
15056 do_neon_qmovun (void)
15058 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15059 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15060 /* Saturating move with unsigned results. Operands must be signed. */
15061 NEON_ENCODE (INTEGER
, inst
);
15062 neon_two_same (0, 1, et
.size
/ 2);
15066 do_neon_rshift_sat_narrow (void)
15068 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15069 or unsigned. If operands are unsigned, results must also be unsigned. */
15070 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15071 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15072 int imm
= inst
.operands
[2].imm
;
15073 /* This gets the bounds check, size encoding and immediate bits calculation
15077 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15078 VQMOVN.I<size> <Dd>, <Qm>. */
15081 inst
.operands
[2].present
= 0;
15082 inst
.instruction
= N_MNEM_vqmovn
;
15087 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15088 _("immediate out of range"));
15089 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15093 do_neon_rshift_sat_narrow_u (void)
15095 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15096 or unsigned. If operands are unsigned, results must also be unsigned. */
15097 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15098 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15099 int imm
= inst
.operands
[2].imm
;
15100 /* This gets the bounds check, size encoding and immediate bits calculation
15104 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15105 VQMOVUN.I<size> <Dd>, <Qm>. */
15108 inst
.operands
[2].present
= 0;
15109 inst
.instruction
= N_MNEM_vqmovun
;
15114 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15115 _("immediate out of range"));
15116 /* FIXME: The manual is kind of unclear about what value U should have in
15117 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15119 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15123 do_neon_movn (void)
15125 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15126 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15127 NEON_ENCODE (INTEGER
, inst
);
15128 neon_two_same (0, 1, et
.size
/ 2);
15132 do_neon_rshift_narrow (void)
15134 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15135 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15136 int imm
= inst
.operands
[2].imm
;
15137 /* This gets the bounds check, size encoding and immediate bits calculation
15141 /* If immediate is zero then we are a pseudo-instruction for
15142 VMOVN.I<size> <Dd>, <Qm> */
15145 inst
.operands
[2].present
= 0;
15146 inst
.instruction
= N_MNEM_vmovn
;
15151 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15152 _("immediate out of range for narrowing operation"));
15153 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15157 do_neon_shll (void)
15159 /* FIXME: Type checking when lengthening. */
15160 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15161 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15162 unsigned imm
= inst
.operands
[2].imm
;
15164 if (imm
== et
.size
)
15166 /* Maximum shift variant. */
15167 NEON_ENCODE (INTEGER
, inst
);
15168 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15169 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15170 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15171 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15172 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15174 neon_dp_fixup (&inst
);
15178 /* A more-specific type check for non-max versions. */
15179 et
= neon_check_type (2, NS_QDI
,
15180 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15181 NEON_ENCODE (IMMED
, inst
);
15182 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15186 /* Check the various types for the VCVT instruction, and return which version
15187 the current instruction is. */
15189 #define CVT_FLAVOUR_VAR \
15190 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15191 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15192 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15193 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15194 /* Half-precision conversions. */ \
15195 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15196 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15197 /* VFP instructions. */ \
15198 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15199 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15200 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15201 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15202 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15203 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15204 /* VFP instructions with bitshift. */ \
15205 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15206 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15207 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15208 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15209 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15210 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15211 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15212 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15214 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15215 neon_cvt_flavour_##C,
15217 /* The different types of conversions we can do. */
15218 enum neon_cvt_flavour
15221 neon_cvt_flavour_invalid
,
15222 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15227 static enum neon_cvt_flavour
15228 get_neon_cvt_flavour (enum neon_shape rs
)
15230 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15231 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15232 if (et.type != NT_invtype) \
15234 inst.error = NULL; \
15235 return (neon_cvt_flavour_##C); \
15238 struct neon_type_el et
;
15239 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15240 || rs
== NS_FF
) ? N_VFP
: 0;
15241 /* The instruction versions which take an immediate take one register
15242 argument, which is extended to the width of the full register. Thus the
15243 "source" and "destination" registers must have the same width. Hack that
15244 here by making the size equal to the key (wider, in this case) operand. */
15245 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15249 return neon_cvt_flavour_invalid
;
15264 /* Neon-syntax VFP conversions. */
15267 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15269 const char *opname
= 0;
15271 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
15272 || rs
== NS_FHI
|| rs
== NS_HFI
)
15274 /* Conversions with immediate bitshift. */
15275 const char *enc
[] =
15277 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15283 if (flavour
< (int) ARRAY_SIZE (enc
))
15285 opname
= enc
[flavour
];
15286 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15287 _("operands 0 and 1 must be the same register"));
15288 inst
.operands
[1] = inst
.operands
[2];
15289 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15294 /* Conversions without bitshift. */
15295 const char *enc
[] =
15297 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15303 if (flavour
< (int) ARRAY_SIZE (enc
))
15304 opname
= enc
[flavour
];
15308 do_vfp_nsyn_opcode (opname
);
15312 do_vfp_nsyn_cvtz (void)
15314 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
15315 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15316 const char *enc
[] =
15318 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15324 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15325 do_vfp_nsyn_opcode (enc
[flavour
]);
15329 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15330 enum neon_cvt_mode mode
)
15335 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15336 D register operands. */
15337 if (flavour
== neon_cvt_flavour_s32_f64
15338 || flavour
== neon_cvt_flavour_u32_f64
)
15339 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15342 set_it_insn_type (OUTSIDE_IT_INSN
);
15346 case neon_cvt_flavour_s32_f64
:
15350 case neon_cvt_flavour_s32_f32
:
15354 case neon_cvt_flavour_u32_f64
:
15358 case neon_cvt_flavour_u32_f32
:
15363 first_error (_("invalid instruction shape"));
15369 case neon_cvt_mode_a
: rm
= 0; break;
15370 case neon_cvt_mode_n
: rm
= 1; break;
15371 case neon_cvt_mode_p
: rm
= 2; break;
15372 case neon_cvt_mode_m
: rm
= 3; break;
15373 default: first_error (_("invalid rounding mode")); return;
15376 NEON_ENCODE (FPV8
, inst
);
15377 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15378 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15379 inst
.instruction
|= sz
<< 8;
15380 inst
.instruction
|= op
<< 7;
15381 inst
.instruction
|= rm
<< 16;
15382 inst
.instruction
|= 0xf0000000;
15383 inst
.is_neon
= TRUE
;
15387 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15389 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15390 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
15391 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
15393 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15395 /* PR11109: Handle round-to-zero for VCVT conversions. */
15396 if (mode
== neon_cvt_mode_z
15397 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15398 && (flavour
== neon_cvt_flavour_s32_f32
15399 || flavour
== neon_cvt_flavour_u32_f32
15400 || flavour
== neon_cvt_flavour_s32_f64
15401 || flavour
== neon_cvt_flavour_u32_f64
)
15402 && (rs
== NS_FD
|| rs
== NS_FF
))
15404 do_vfp_nsyn_cvtz ();
15408 /* VFP rather than Neon conversions. */
15409 if (flavour
>= neon_cvt_flavour_first_fp
)
15411 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15412 do_vfp_nsyn_cvt (rs
, flavour
);
15414 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15425 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15427 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15430 /* Fixed-point conversion with #0 immediate is encoded as an
15431 integer conversion. */
15432 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15434 immbits
= 32 - inst
.operands
[2].imm
;
15435 NEON_ENCODE (IMMED
, inst
);
15436 if (flavour
!= neon_cvt_flavour_invalid
)
15437 inst
.instruction
|= enctab
[flavour
];
15438 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15439 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15440 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15441 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15442 inst
.instruction
|= neon_quad (rs
) << 6;
15443 inst
.instruction
|= 1 << 21;
15444 inst
.instruction
|= immbits
<< 16;
15446 neon_dp_fixup (&inst
);
15452 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15454 NEON_ENCODE (FLOAT
, inst
);
15455 set_it_insn_type (OUTSIDE_IT_INSN
);
15457 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15460 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15461 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15462 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15463 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15464 inst
.instruction
|= neon_quad (rs
) << 6;
15465 inst
.instruction
|= (flavour
== neon_cvt_flavour_u32_f32
) << 7;
15466 inst
.instruction
|= mode
<< 8;
15468 inst
.instruction
|= 0xfc000000;
15470 inst
.instruction
|= 0xf0000000;
15476 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
15478 NEON_ENCODE (INTEGER
, inst
);
15480 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15483 if (flavour
!= neon_cvt_flavour_invalid
)
15484 inst
.instruction
|= enctab
[flavour
];
15486 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15487 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15488 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15489 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15490 inst
.instruction
|= neon_quad (rs
) << 6;
15491 inst
.instruction
|= 2 << 18;
15493 neon_dp_fixup (&inst
);
15498 /* Half-precision conversions for Advanced SIMD -- neon. */
15503 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15505 as_bad (_("operand size must match register width"));
15510 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15512 as_bad (_("operand size must match register width"));
15517 inst
.instruction
= 0x3b60600;
15519 inst
.instruction
= 0x3b60700;
15521 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15522 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15523 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15524 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15525 neon_dp_fixup (&inst
);
15529 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15530 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15531 do_vfp_nsyn_cvt (rs
, flavour
);
15533 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15538 do_neon_cvtr (void)
15540 do_neon_cvt_1 (neon_cvt_mode_x
);
15546 do_neon_cvt_1 (neon_cvt_mode_z
);
15550 do_neon_cvta (void)
15552 do_neon_cvt_1 (neon_cvt_mode_a
);
15556 do_neon_cvtn (void)
15558 do_neon_cvt_1 (neon_cvt_mode_n
);
15562 do_neon_cvtp (void)
15564 do_neon_cvt_1 (neon_cvt_mode_p
);
15568 do_neon_cvtm (void)
15570 do_neon_cvt_1 (neon_cvt_mode_m
);
15574 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15577 mark_feature_used (&fpu_vfp_ext_armv8
);
15579 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15580 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15581 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15582 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15583 inst
.instruction
|= to
? 0x10000 : 0;
15584 inst
.instruction
|= t
? 0x80 : 0;
15585 inst
.instruction
|= is_double
? 0x100 : 0;
15586 do_vfp_cond_or_thumb ();
15590 do_neon_cvttb_1 (bfd_boolean t
)
15592 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
15593 NS_DF
, NS_DH
, NS_NULL
);
15597 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15600 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15602 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15605 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15607 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15609 /* The VCVTB and VCVTT instructions with D-register operands
15610 don't work for SP only targets. */
15611 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15615 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15617 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15619 /* The VCVTB and VCVTT instructions with D-register operands
15620 don't work for SP only targets. */
15621 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15625 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15632 do_neon_cvtb (void)
15634 do_neon_cvttb_1 (FALSE
);
15639 do_neon_cvtt (void)
15641 do_neon_cvttb_1 (TRUE
);
15645 neon_move_immediate (void)
15647 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15648 struct neon_type_el et
= neon_check_type (2, rs
,
15649 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15650 unsigned immlo
, immhi
= 0, immbits
;
15651 int op
, cmode
, float_p
;
15653 constraint (et
.type
== NT_invtype
,
15654 _("operand size must be specified for immediate VMOV"));
15656 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15657 op
= (inst
.instruction
& (1 << 5)) != 0;
15659 immlo
= inst
.operands
[1].imm
;
15660 if (inst
.operands
[1].regisimm
)
15661 immhi
= inst
.operands
[1].reg
;
15663 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
15664 _("immediate has bits set outside the operand size"));
15666 float_p
= inst
.operands
[1].immisfloat
;
15668 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
15669 et
.size
, et
.type
)) == FAIL
)
15671 /* Invert relevant bits only. */
15672 neon_invert_size (&immlo
, &immhi
, et
.size
);
15673 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15674 with one or the other; those cases are caught by
15675 neon_cmode_for_move_imm. */
15677 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
15678 &op
, et
.size
, et
.type
)) == FAIL
)
15680 first_error (_("immediate out of range"));
15685 inst
.instruction
&= ~(1 << 5);
15686 inst
.instruction
|= op
<< 5;
15688 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15689 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15690 inst
.instruction
|= neon_quad (rs
) << 6;
15691 inst
.instruction
|= cmode
<< 8;
15693 neon_write_immbits (immbits
);
15699 if (inst
.operands
[1].isreg
)
15701 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15703 NEON_ENCODE (INTEGER
, inst
);
15704 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15705 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15706 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15707 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15708 inst
.instruction
|= neon_quad (rs
) << 6;
15712 NEON_ENCODE (IMMED
, inst
);
15713 neon_move_immediate ();
15716 neon_dp_fixup (&inst
);
15719 /* Encode instructions of form:
15721 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15722 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15725 neon_mixed_length (struct neon_type_el et
, unsigned size
)
15727 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15728 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15729 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15730 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15731 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15732 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15733 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
15734 inst
.instruction
|= neon_logbits (size
) << 20;
15736 neon_dp_fixup (&inst
);
15740 do_neon_dyadic_long (void)
15742 /* FIXME: Type checking for lengthening op. */
15743 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15744 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15745 neon_mixed_length (et
, et
.size
);
15749 do_neon_abal (void)
15751 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15752 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15753 neon_mixed_length (et
, et
.size
);
15757 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
15759 if (inst
.operands
[2].isscalar
)
15761 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
15762 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
15763 NEON_ENCODE (SCALAR
, inst
);
15764 neon_mul_mac (et
, et
.type
== NT_unsigned
);
15768 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15769 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
15770 NEON_ENCODE (INTEGER
, inst
);
15771 neon_mixed_length (et
, et
.size
);
15776 do_neon_mac_maybe_scalar_long (void)
15778 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
15782 do_neon_dyadic_wide (void)
15784 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
15785 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15786 neon_mixed_length (et
, et
.size
);
15790 do_neon_dyadic_narrow (void)
15792 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15793 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
15794 /* Operand sign is unimportant, and the U bit is part of the opcode,
15795 so force the operand type to integer. */
15796 et
.type
= NT_integer
;
15797 neon_mixed_length (et
, et
.size
/ 2);
15801 do_neon_mul_sat_scalar_long (void)
15803 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
15807 do_neon_vmull (void)
15809 if (inst
.operands
[2].isscalar
)
15810 do_neon_mac_maybe_scalar_long ();
15813 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15814 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
15816 if (et
.type
== NT_poly
)
15817 NEON_ENCODE (POLY
, inst
);
15819 NEON_ENCODE (INTEGER
, inst
);
15821 /* For polynomial encoding the U bit must be zero, and the size must
15822 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15823 obviously, as 0b10). */
15826 /* Check we're on the correct architecture. */
15827 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
15829 _("Instruction form not available on this architecture.");
15834 neon_mixed_length (et
, et
.size
);
15841 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
15842 struct neon_type_el et
= neon_check_type (3, rs
,
15843 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15844 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
15846 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
15847 _("shift out of range"));
15848 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15849 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15850 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15851 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15852 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15853 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15854 inst
.instruction
|= neon_quad (rs
) << 6;
15855 inst
.instruction
|= imm
<< 8;
15857 neon_dp_fixup (&inst
);
15863 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15864 struct neon_type_el et
= neon_check_type (2, rs
,
15865 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15866 unsigned op
= (inst
.instruction
>> 7) & 3;
15867 /* N (width of reversed regions) is encoded as part of the bitmask. We
15868 extract it here to check the elements to be reversed are smaller.
15869 Otherwise we'd get a reserved instruction. */
15870 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
15871 gas_assert (elsize
!= 0);
15872 constraint (et
.size
>= elsize
,
15873 _("elements must be smaller than reversal region"));
15874 neon_two_same (neon_quad (rs
), 1, et
.size
);
15880 if (inst
.operands
[1].isscalar
)
15882 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
15883 struct neon_type_el et
= neon_check_type (2, rs
,
15884 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15885 unsigned sizebits
= et
.size
>> 3;
15886 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
15887 int logsize
= neon_logbits (et
.size
);
15888 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
15890 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
15893 NEON_ENCODE (SCALAR
, inst
);
15894 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15895 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15896 inst
.instruction
|= LOW4 (dm
);
15897 inst
.instruction
|= HI1 (dm
) << 5;
15898 inst
.instruction
|= neon_quad (rs
) << 6;
15899 inst
.instruction
|= x
<< 17;
15900 inst
.instruction
|= sizebits
<< 16;
15902 neon_dp_fixup (&inst
);
15906 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
15907 struct neon_type_el et
= neon_check_type (2, rs
,
15908 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
15909 /* Duplicate ARM register to lanes of vector. */
15910 NEON_ENCODE (ARMREG
, inst
);
15913 case 8: inst
.instruction
|= 0x400000; break;
15914 case 16: inst
.instruction
|= 0x000020; break;
15915 case 32: inst
.instruction
|= 0x000000; break;
15918 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
15919 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
15920 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
15921 inst
.instruction
|= neon_quad (rs
) << 21;
15922 /* The encoding for this instruction is identical for the ARM and Thumb
15923 variants, except for the condition field. */
15924 do_vfp_cond_or_thumb ();
15928 /* VMOV has particularly many variations. It can be one of:
15929 0. VMOV<c><q> <Qd>, <Qm>
15930 1. VMOV<c><q> <Dd>, <Dm>
15931 (Register operations, which are VORR with Rm = Rn.)
15932 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15933 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15935 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15936 (ARM register to scalar.)
15937 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15938 (Two ARM registers to vector.)
15939 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15940 (Scalar to ARM register.)
15941 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15942 (Vector to two ARM registers.)
15943 8. VMOV.F32 <Sd>, <Sm>
15944 9. VMOV.F64 <Dd>, <Dm>
15945 (VFP register moves.)
15946 10. VMOV.F32 <Sd>, #imm
15947 11. VMOV.F64 <Dd>, #imm
15948 (VFP float immediate load.)
15949 12. VMOV <Rd>, <Sm>
15950 (VFP single to ARM reg.)
15951 13. VMOV <Sd>, <Rm>
15952 (ARM reg to VFP single.)
15953 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15954 (Two ARM regs to two VFP singles.)
15955 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15956 (Two VFP singles to two ARM regs.)
15958 These cases can be disambiguated using neon_select_shape, except cases 1/9
15959 and 3/11 which depend on the operand type too.
15961 All the encoded bits are hardcoded by this function.
15963 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15964 Cases 5, 7 may be used with VFPv2 and above.
15966 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15967 can specify a type where it doesn't make sense to, and is ignored). */
15972 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
15973 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
15975 struct neon_type_el et
;
15976 const char *ldconst
= 0;
15980 case NS_DD
: /* case 1/9. */
15981 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
15982 /* It is not an error here if no type is given. */
15984 if (et
.type
== NT_float
&& et
.size
== 64)
15986 do_vfp_nsyn_opcode ("fcpyd");
15989 /* fall through. */
15991 case NS_QQ
: /* case 0/1. */
15993 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15995 /* The architecture manual I have doesn't explicitly state which
15996 value the U bit should have for register->register moves, but
15997 the equivalent VORR instruction has U = 0, so do that. */
15998 inst
.instruction
= 0x0200110;
15999 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16000 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16001 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16002 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16003 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16004 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16005 inst
.instruction
|= neon_quad (rs
) << 6;
16007 neon_dp_fixup (&inst
);
16011 case NS_DI
: /* case 3/11. */
16012 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16014 if (et
.type
== NT_float
&& et
.size
== 64)
16016 /* case 11 (fconstd). */
16017 ldconst
= "fconstd";
16018 goto encode_fconstd
;
16020 /* fall through. */
16022 case NS_QI
: /* case 2/3. */
16023 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16025 inst
.instruction
= 0x0800010;
16026 neon_move_immediate ();
16027 neon_dp_fixup (&inst
);
16030 case NS_SR
: /* case 4. */
16032 unsigned bcdebits
= 0;
16034 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
16035 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
16037 /* .<size> is optional here, defaulting to .32. */
16038 if (inst
.vectype
.elems
== 0
16039 && inst
.operands
[0].vectype
.type
== NT_invtype
16040 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16042 inst
.vectype
.el
[0].type
= NT_untyped
;
16043 inst
.vectype
.el
[0].size
= 32;
16044 inst
.vectype
.elems
= 1;
16047 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16048 logsize
= neon_logbits (et
.size
);
16050 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16052 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16053 && et
.size
!= 32, _(BAD_FPU
));
16054 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16055 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16059 case 8: bcdebits
= 0x8; break;
16060 case 16: bcdebits
= 0x1; break;
16061 case 32: bcdebits
= 0x0; break;
16065 bcdebits
|= x
<< logsize
;
16067 inst
.instruction
= 0xe000b10;
16068 do_vfp_cond_or_thumb ();
16069 inst
.instruction
|= LOW4 (dn
) << 16;
16070 inst
.instruction
|= HI1 (dn
) << 7;
16071 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16072 inst
.instruction
|= (bcdebits
& 3) << 5;
16073 inst
.instruction
|= (bcdebits
>> 2) << 21;
16077 case NS_DRR
: /* case 5 (fmdrr). */
16078 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16081 inst
.instruction
= 0xc400b10;
16082 do_vfp_cond_or_thumb ();
16083 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16084 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16085 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16086 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16089 case NS_RS
: /* case 6. */
16092 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16093 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16094 unsigned abcdebits
= 0;
16096 /* .<dt> is optional here, defaulting to .32. */
16097 if (inst
.vectype
.elems
== 0
16098 && inst
.operands
[0].vectype
.type
== NT_invtype
16099 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16101 inst
.vectype
.el
[0].type
= NT_untyped
;
16102 inst
.vectype
.el
[0].size
= 32;
16103 inst
.vectype
.elems
= 1;
16106 et
= neon_check_type (2, NS_NULL
,
16107 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16108 logsize
= neon_logbits (et
.size
);
16110 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16112 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16113 && et
.size
!= 32, _(BAD_FPU
));
16114 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16115 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16119 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16120 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16121 case 32: abcdebits
= 0x00; break;
16125 abcdebits
|= x
<< logsize
;
16126 inst
.instruction
= 0xe100b10;
16127 do_vfp_cond_or_thumb ();
16128 inst
.instruction
|= LOW4 (dn
) << 16;
16129 inst
.instruction
|= HI1 (dn
) << 7;
16130 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16131 inst
.instruction
|= (abcdebits
& 3) << 5;
16132 inst
.instruction
|= (abcdebits
>> 2) << 21;
16136 case NS_RRD
: /* case 7 (fmrrd). */
16137 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16140 inst
.instruction
= 0xc500b10;
16141 do_vfp_cond_or_thumb ();
16142 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16143 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16144 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16145 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16148 case NS_FF
: /* case 8 (fcpys). */
16149 do_vfp_nsyn_opcode ("fcpys");
16152 case NS_FI
: /* case 10 (fconsts). */
16153 ldconst
= "fconsts";
16155 if (is_quarter_float (inst
.operands
[1].imm
))
16157 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16158 do_vfp_nsyn_opcode (ldconst
);
16161 first_error (_("immediate out of range"));
16164 case NS_RF
: /* case 12 (fmrs). */
16165 do_vfp_nsyn_opcode ("fmrs");
16168 case NS_FR
: /* case 13 (fmsr). */
16169 do_vfp_nsyn_opcode ("fmsr");
16172 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16173 (one of which is a list), but we have parsed four. Do some fiddling to
16174 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16176 case NS_RRFF
: /* case 14 (fmrrs). */
16177 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16178 _("VFP registers must be adjacent"));
16179 inst
.operands
[2].imm
= 2;
16180 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16181 do_vfp_nsyn_opcode ("fmrrs");
16184 case NS_FFRR
: /* case 15 (fmsrr). */
16185 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16186 _("VFP registers must be adjacent"));
16187 inst
.operands
[1] = inst
.operands
[2];
16188 inst
.operands
[2] = inst
.operands
[3];
16189 inst
.operands
[0].imm
= 2;
16190 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16191 do_vfp_nsyn_opcode ("fmsrr");
16195 /* neon_select_shape has determined that the instruction
16196 shape is wrong and has already set the error message. */
16205 do_neon_rshift_round_imm (void)
16207 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16208 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16209 int imm
= inst
.operands
[2].imm
;
16211 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16214 inst
.operands
[2].present
= 0;
16219 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16220 _("immediate out of range for shift"));
16221 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16226 do_neon_movl (void)
16228 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16229 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16230 unsigned sizebits
= et
.size
>> 3;
16231 inst
.instruction
|= sizebits
<< 19;
16232 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16238 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16239 struct neon_type_el et
= neon_check_type (2, rs
,
16240 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16241 NEON_ENCODE (INTEGER
, inst
);
16242 neon_two_same (neon_quad (rs
), 1, et
.size
);
16246 do_neon_zip_uzp (void)
16248 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16249 struct neon_type_el et
= neon_check_type (2, rs
,
16250 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16251 if (rs
== NS_DD
&& et
.size
== 32)
16253 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16254 inst
.instruction
= N_MNEM_vtrn
;
16258 neon_two_same (neon_quad (rs
), 1, et
.size
);
16262 do_neon_sat_abs_neg (void)
16264 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16265 struct neon_type_el et
= neon_check_type (2, rs
,
16266 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16267 neon_two_same (neon_quad (rs
), 1, et
.size
);
16271 do_neon_pair_long (void)
16273 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16274 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16275 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16276 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16277 neon_two_same (neon_quad (rs
), 1, et
.size
);
16281 do_neon_recip_est (void)
16283 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16284 struct neon_type_el et
= neon_check_type (2, rs
,
16285 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
16286 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16287 neon_two_same (neon_quad (rs
), 1, et
.size
);
16293 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16294 struct neon_type_el et
= neon_check_type (2, rs
,
16295 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16296 neon_two_same (neon_quad (rs
), 1, et
.size
);
16302 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16303 struct neon_type_el et
= neon_check_type (2, rs
,
16304 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16305 neon_two_same (neon_quad (rs
), 1, et
.size
);
16311 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16312 struct neon_type_el et
= neon_check_type (2, rs
,
16313 N_EQK
| N_INT
, N_8
| N_KEY
);
16314 neon_two_same (neon_quad (rs
), 1, et
.size
);
16320 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16321 neon_two_same (neon_quad (rs
), 1, -1);
16325 do_neon_tbl_tbx (void)
16327 unsigned listlenbits
;
16328 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16330 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16332 first_error (_("bad list length for table lookup"));
16336 listlenbits
= inst
.operands
[1].imm
- 1;
16337 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16338 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16339 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16340 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16341 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16342 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16343 inst
.instruction
|= listlenbits
<< 8;
16345 neon_dp_fixup (&inst
);
16349 do_neon_ldm_stm (void)
16351 /* P, U and L bits are part of bitmask. */
16352 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16353 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16355 if (inst
.operands
[1].issingle
)
16357 do_vfp_nsyn_ldm_stm (is_dbmode
);
16361 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16362 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16364 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16365 _("register list must contain at least 1 and at most 16 "
16368 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16369 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16370 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16371 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16373 inst
.instruction
|= offsetbits
;
16375 do_vfp_cond_or_thumb ();
16379 do_neon_ldr_str (void)
16381 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16383 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16384 And is UNPREDICTABLE in thumb mode. */
16386 && inst
.operands
[1].reg
== REG_PC
16387 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16390 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16391 else if (warn_on_deprecated
)
16392 as_tsktsk (_("Use of PC here is deprecated"));
16395 if (inst
.operands
[0].issingle
)
16398 do_vfp_nsyn_opcode ("flds");
16400 do_vfp_nsyn_opcode ("fsts");
16405 do_vfp_nsyn_opcode ("fldd");
16407 do_vfp_nsyn_opcode ("fstd");
16411 /* "interleave" version also handles non-interleaving register VLD1/VST1
16415 do_neon_ld_st_interleave (void)
16417 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16418 N_8
| N_16
| N_32
| N_64
);
16419 unsigned alignbits
= 0;
16421 /* The bits in this table go:
16422 0: register stride of one (0) or two (1)
16423 1,2: register list length, minus one (1, 2, 3, 4).
16424 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16425 We use -1 for invalid entries. */
16426 const int typetable
[] =
16428 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16429 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16430 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16431 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16435 if (et
.type
== NT_invtype
)
16438 if (inst
.operands
[1].immisalign
)
16439 switch (inst
.operands
[1].imm
>> 8)
16441 case 64: alignbits
= 1; break;
16443 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16444 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16445 goto bad_alignment
;
16449 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16450 goto bad_alignment
;
16455 first_error (_("bad alignment"));
16459 inst
.instruction
|= alignbits
<< 4;
16460 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16462 /* Bits [4:6] of the immediate in a list specifier encode register stride
16463 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16464 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16465 up the right value for "type" in a table based on this value and the given
16466 list style, then stick it back. */
16467 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16468 | (((inst
.instruction
>> 8) & 3) << 3);
16470 typebits
= typetable
[idx
];
16472 constraint (typebits
== -1, _("bad list type for instruction"));
16473 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16474 _("bad element type for instruction"));
16476 inst
.instruction
&= ~0xf00;
16477 inst
.instruction
|= typebits
<< 8;
16480 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16481 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16482 otherwise. The variable arguments are a list of pairs of legal (size, align)
16483 values, terminated with -1. */
16486 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
16489 int result
= FAIL
, thissize
, thisalign
;
16491 if (!inst
.operands
[1].immisalign
)
16497 va_start (ap
, do_align
);
16501 thissize
= va_arg (ap
, int);
16502 if (thissize
== -1)
16504 thisalign
= va_arg (ap
, int);
16506 if (size
== thissize
&& align
== thisalign
)
16509 while (result
!= SUCCESS
);
16513 if (result
== SUCCESS
)
16516 first_error (_("unsupported alignment for instruction"));
16522 do_neon_ld_st_lane (void)
16524 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16525 int align_good
, do_align
= 0;
16526 int logsize
= neon_logbits (et
.size
);
16527 int align
= inst
.operands
[1].imm
>> 8;
16528 int n
= (inst
.instruction
>> 8) & 3;
16529 int max_el
= 64 / et
.size
;
16531 if (et
.type
== NT_invtype
)
16534 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16535 _("bad list length"));
16536 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16537 _("scalar index out of range"));
16538 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16540 _("stride of 2 unavailable when element size is 8"));
16544 case 0: /* VLD1 / VST1. */
16545 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
16547 if (align_good
== FAIL
)
16551 unsigned alignbits
= 0;
16554 case 16: alignbits
= 0x1; break;
16555 case 32: alignbits
= 0x3; break;
16558 inst
.instruction
|= alignbits
<< 4;
16562 case 1: /* VLD2 / VST2. */
16563 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
16565 if (align_good
== FAIL
)
16568 inst
.instruction
|= 1 << 4;
16571 case 2: /* VLD3 / VST3. */
16572 constraint (inst
.operands
[1].immisalign
,
16573 _("can't use alignment with this instruction"));
16576 case 3: /* VLD4 / VST4. */
16577 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16578 16, 64, 32, 64, 32, 128, -1);
16579 if (align_good
== FAIL
)
16583 unsigned alignbits
= 0;
16586 case 8: alignbits
= 0x1; break;
16587 case 16: alignbits
= 0x1; break;
16588 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16591 inst
.instruction
|= alignbits
<< 4;
16598 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16599 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16600 inst
.instruction
|= 1 << (4 + logsize
);
16602 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16603 inst
.instruction
|= logsize
<< 10;
16606 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16609 do_neon_ld_dup (void)
16611 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16612 int align_good
, do_align
= 0;
16614 if (et
.type
== NT_invtype
)
16617 switch ((inst
.instruction
>> 8) & 3)
16619 case 0: /* VLD1. */
16620 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16621 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16622 &do_align
, 16, 16, 32, 32, -1);
16623 if (align_good
== FAIL
)
16625 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
16628 case 2: inst
.instruction
|= 1 << 5; break;
16629 default: first_error (_("bad list length")); return;
16631 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16634 case 1: /* VLD2. */
16635 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16636 &do_align
, 8, 16, 16, 32, 32, 64, -1);
16637 if (align_good
== FAIL
)
16639 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
16640 _("bad list length"));
16641 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16642 inst
.instruction
|= 1 << 5;
16643 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16646 case 2: /* VLD3. */
16647 constraint (inst
.operands
[1].immisalign
,
16648 _("can't use alignment with this instruction"));
16649 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
16650 _("bad list length"));
16651 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16652 inst
.instruction
|= 1 << 5;
16653 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16656 case 3: /* VLD4. */
16658 int align
= inst
.operands
[1].imm
>> 8;
16659 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16660 16, 64, 32, 64, 32, 128, -1);
16661 if (align_good
== FAIL
)
16663 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
16664 _("bad list length"));
16665 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16666 inst
.instruction
|= 1 << 5;
16667 if (et
.size
== 32 && align
== 128)
16668 inst
.instruction
|= 0x3 << 6;
16670 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16677 inst
.instruction
|= do_align
<< 4;
16680 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16681 apart from bits [11:4]. */
16684 do_neon_ldx_stx (void)
16686 if (inst
.operands
[1].isreg
)
16687 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
16689 switch (NEON_LANE (inst
.operands
[0].imm
))
16691 case NEON_INTERLEAVE_LANES
:
16692 NEON_ENCODE (INTERLV
, inst
);
16693 do_neon_ld_st_interleave ();
16696 case NEON_ALL_LANES
:
16697 NEON_ENCODE (DUP
, inst
);
16698 if (inst
.instruction
== N_INV
)
16700 first_error ("only loads support such operands");
16707 NEON_ENCODE (LANE
, inst
);
16708 do_neon_ld_st_lane ();
16711 /* L bit comes from bit mask. */
16712 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16713 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16714 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16716 if (inst
.operands
[1].postind
)
16718 int postreg
= inst
.operands
[1].imm
& 0xf;
16719 constraint (!inst
.operands
[1].immisreg
,
16720 _("post-index must be a register"));
16721 constraint (postreg
== 0xd || postreg
== 0xf,
16722 _("bad register for post-index"));
16723 inst
.instruction
|= postreg
;
16727 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
16728 constraint (inst
.reloc
.exp
.X_op
!= O_constant
16729 || inst
.reloc
.exp
.X_add_number
!= 0,
16732 if (inst
.operands
[1].writeback
)
16734 inst
.instruction
|= 0xd;
16737 inst
.instruction
|= 0xf;
16741 inst
.instruction
|= 0xf9000000;
16743 inst
.instruction
|= 0xf4000000;
16748 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
16750 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16751 D register operands. */
16752 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16753 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16756 NEON_ENCODE (FPV8
, inst
);
16759 do_vfp_sp_dyadic ();
16761 do_vfp_dp_rd_rn_rm ();
16764 inst
.instruction
|= 0x100;
16766 inst
.instruction
|= 0xf0000000;
16772 set_it_insn_type (OUTSIDE_IT_INSN
);
16774 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
16775 first_error (_("invalid instruction shape"));
16781 set_it_insn_type (OUTSIDE_IT_INSN
);
16783 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
16786 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16789 neon_dyadic_misc (NT_untyped
, N_F32
, 0);
16793 do_vrint_1 (enum neon_cvt_mode mode
)
16795 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
16796 struct neon_type_el et
;
16801 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16802 D register operands. */
16803 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16804 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16807 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
16808 if (et
.type
!= NT_invtype
)
16810 /* VFP encodings. */
16811 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
16812 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
16813 set_it_insn_type (OUTSIDE_IT_INSN
);
16815 NEON_ENCODE (FPV8
, inst
);
16817 do_vfp_sp_monadic ();
16819 do_vfp_dp_rd_rm ();
16823 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
16824 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
16825 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
16826 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
16827 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
16828 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
16829 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
16833 inst
.instruction
|= (rs
== NS_DD
) << 8;
16834 do_vfp_cond_or_thumb ();
16838 /* Neon encodings (or something broken...). */
16840 et
= neon_check_type (2, rs
, N_EQK
, N_F32
| N_KEY
);
16842 if (et
.type
== NT_invtype
)
16845 set_it_insn_type (OUTSIDE_IT_INSN
);
16846 NEON_ENCODE (FLOAT
, inst
);
16848 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16851 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16852 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16853 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16854 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16855 inst
.instruction
|= neon_quad (rs
) << 6;
16858 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
16859 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
16860 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
16861 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
16862 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
16863 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
16864 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
16869 inst
.instruction
|= 0xfc000000;
16871 inst
.instruction
|= 0xf0000000;
16878 do_vrint_1 (neon_cvt_mode_x
);
16884 do_vrint_1 (neon_cvt_mode_z
);
16890 do_vrint_1 (neon_cvt_mode_r
);
16896 do_vrint_1 (neon_cvt_mode_a
);
16902 do_vrint_1 (neon_cvt_mode_n
);
16908 do_vrint_1 (neon_cvt_mode_p
);
16914 do_vrint_1 (neon_cvt_mode_m
);
16917 /* Crypto v1 instructions. */
16919 do_crypto_2op_1 (unsigned elttype
, int op
)
16921 set_it_insn_type (OUTSIDE_IT_INSN
);
16923 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
16929 NEON_ENCODE (INTEGER
, inst
);
16930 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16931 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16932 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16933 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16935 inst
.instruction
|= op
<< 6;
16938 inst
.instruction
|= 0xfc000000;
16940 inst
.instruction
|= 0xf0000000;
16944 do_crypto_3op_1 (int u
, int op
)
16946 set_it_insn_type (OUTSIDE_IT_INSN
);
16948 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
16949 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
16954 NEON_ENCODE (INTEGER
, inst
);
16955 neon_three_same (1, u
, 8 << op
);
16961 do_crypto_2op_1 (N_8
, 0);
16967 do_crypto_2op_1 (N_8
, 1);
16973 do_crypto_2op_1 (N_8
, 2);
16979 do_crypto_2op_1 (N_8
, 3);
16985 do_crypto_3op_1 (0, 0);
16991 do_crypto_3op_1 (0, 1);
16997 do_crypto_3op_1 (0, 2);
17003 do_crypto_3op_1 (0, 3);
17009 do_crypto_3op_1 (1, 0);
17015 do_crypto_3op_1 (1, 1);
17019 do_sha256su1 (void)
17021 do_crypto_3op_1 (1, 2);
17027 do_crypto_2op_1 (N_32
, -1);
17033 do_crypto_2op_1 (N_32
, 0);
17037 do_sha256su0 (void)
17039 do_crypto_2op_1 (N_32
, 1);
17043 do_crc32_1 (unsigned int poly
, unsigned int sz
)
17045 unsigned int Rd
= inst
.operands
[0].reg
;
17046 unsigned int Rn
= inst
.operands
[1].reg
;
17047 unsigned int Rm
= inst
.operands
[2].reg
;
17049 set_it_insn_type (OUTSIDE_IT_INSN
);
17050 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
17051 inst
.instruction
|= LOW4 (Rn
) << 16;
17052 inst
.instruction
|= LOW4 (Rm
);
17053 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
17054 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
17056 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
17057 as_warn (UNPRED_REG ("r15"));
17058 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
17059 as_warn (UNPRED_REG ("r13"));
17099 /* Overall per-instruction processing. */
17101 /* We need to be able to fix up arbitrary expressions in some statements.
17102 This is so that we can handle symbols that are an arbitrary distance from
17103 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17104 which returns part of an address in a form which will be valid for
17105 a data instruction. We do this by pushing the expression into a symbol
17106 in the expr_section, and creating a fix for that. */
17109 fix_new_arm (fragS
* frag
,
17123 /* Create an absolute valued symbol, so we have something to
17124 refer to in the object file. Unfortunately for us, gas's
17125 generic expression parsing will already have folded out
17126 any use of .set foo/.type foo %function that may have
17127 been used to set type information of the target location,
17128 that's being specified symbolically. We have to presume
17129 the user knows what they are doing. */
17133 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17135 symbol
= symbol_find_or_make (name
);
17136 S_SET_SEGMENT (symbol
, absolute_section
);
17137 symbol_set_frag (symbol
, &zero_address_frag
);
17138 S_SET_VALUE (symbol
, exp
->X_add_number
);
17139 exp
->X_op
= O_symbol
;
17140 exp
->X_add_symbol
= symbol
;
17141 exp
->X_add_number
= 0;
17147 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17148 (enum bfd_reloc_code_real
) reloc
);
17152 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17153 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17157 /* Mark whether the fix is to a THUMB instruction, or an ARM
17159 new_fix
->tc_fix_data
= thumb_mode
;
17162 /* Create a frg for an instruction requiring relaxation. */
17164 output_relax_insn (void)
17170 /* The size of the instruction is unknown, so tie the debug info to the
17171 start of the instruction. */
17172 dwarf2_emit_insn (0);
17174 switch (inst
.reloc
.exp
.X_op
)
17177 sym
= inst
.reloc
.exp
.X_add_symbol
;
17178 offset
= inst
.reloc
.exp
.X_add_number
;
17182 offset
= inst
.reloc
.exp
.X_add_number
;
17185 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17189 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17190 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17191 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17194 /* Write a 32-bit thumb instruction to buf. */
17196 put_thumb32_insn (char * buf
, unsigned long insn
)
17198 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17199 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17203 output_inst (const char * str
)
17209 as_bad ("%s -- `%s'", inst
.error
, str
);
17214 output_relax_insn ();
17217 if (inst
.size
== 0)
17220 to
= frag_more (inst
.size
);
17221 /* PR 9814: Record the thumb mode into the current frag so that we know
17222 what type of NOP padding to use, if necessary. We override any previous
17223 setting so that if the mode has changed then the NOPS that we use will
17224 match the encoding of the last instruction in the frag. */
17225 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17227 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17229 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17230 put_thumb32_insn (to
, inst
.instruction
);
17232 else if (inst
.size
> INSN_SIZE
)
17234 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17235 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17236 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17239 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17241 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17242 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17243 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17246 dwarf2_emit_insn (inst
.size
);
17250 output_it_inst (int cond
, int mask
, char * to
)
17252 unsigned long instruction
= 0xbf00;
17255 instruction
|= mask
;
17256 instruction
|= cond
<< 4;
17260 to
= frag_more (2);
17262 dwarf2_emit_insn (2);
17266 md_number_to_chars (to
, instruction
, 2);
17271 /* Tag values used in struct asm_opcode's tag field. */
17274 OT_unconditional
, /* Instruction cannot be conditionalized.
17275 The ARM condition field is still 0xE. */
17276 OT_unconditionalF
, /* Instruction cannot be conditionalized
17277 and carries 0xF in its ARM condition field. */
17278 OT_csuffix
, /* Instruction takes a conditional suffix. */
17279 OT_csuffixF
, /* Some forms of the instruction take a conditional
17280 suffix, others place 0xF where the condition field
17282 OT_cinfix3
, /* Instruction takes a conditional infix,
17283 beginning at character index 3. (In
17284 unified mode, it becomes a suffix.) */
17285 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17286 tsts, cmps, cmns, and teqs. */
17287 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17288 character index 3, even in unified mode. Used for
17289 legacy instructions where suffix and infix forms
17290 may be ambiguous. */
17291 OT_csuf_or_in3
, /* Instruction takes either a conditional
17292 suffix or an infix at character index 3. */
17293 OT_odd_infix_unc
, /* This is the unconditional variant of an
17294 instruction that takes a conditional infix
17295 at an unusual position. In unified mode,
17296 this variant will accept a suffix. */
17297 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17298 are the conditional variants of instructions that
17299 take conditional infixes in unusual positions.
17300 The infix appears at character index
17301 (tag - OT_odd_infix_0). These are not accepted
17302 in unified mode. */
17305 /* Subroutine of md_assemble, responsible for looking up the primary
17306 opcode from the mnemonic the user wrote. STR points to the
17307 beginning of the mnemonic.
17309 This is not simply a hash table lookup, because of conditional
17310 variants. Most instructions have conditional variants, which are
17311 expressed with a _conditional affix_ to the mnemonic. If we were
17312 to encode each conditional variant as a literal string in the opcode
17313 table, it would have approximately 20,000 entries.
17315 Most mnemonics take this affix as a suffix, and in unified syntax,
17316 'most' is upgraded to 'all'. However, in the divided syntax, some
17317 instructions take the affix as an infix, notably the s-variants of
17318 the arithmetic instructions. Of those instructions, all but six
17319 have the infix appear after the third character of the mnemonic.
17321 Accordingly, the algorithm for looking up primary opcodes given
17324 1. Look up the identifier in the opcode table.
17325 If we find a match, go to step U.
17327 2. Look up the last two characters of the identifier in the
17328 conditions table. If we find a match, look up the first N-2
17329 characters of the identifier in the opcode table. If we
17330 find a match, go to step CE.
17332 3. Look up the fourth and fifth characters of the identifier in
17333 the conditions table. If we find a match, extract those
17334 characters from the identifier, and look up the remaining
17335 characters in the opcode table. If we find a match, go
17340 U. Examine the tag field of the opcode structure, in case this is
17341 one of the six instructions with its conditional infix in an
17342 unusual place. If it is, the tag tells us where to find the
17343 infix; look it up in the conditions table and set inst.cond
17344 accordingly. Otherwise, this is an unconditional instruction.
17345 Again set inst.cond accordingly. Return the opcode structure.
17347 CE. Examine the tag field to make sure this is an instruction that
17348 should receive a conditional suffix. If it is not, fail.
17349 Otherwise, set inst.cond from the suffix we already looked up,
17350 and return the opcode structure.
17352 CM. Examine the tag field to make sure this is an instruction that
17353 should receive a conditional infix after the third character.
17354 If it is not, fail. Otherwise, undo the edits to the current
17355 line of input and proceed as for case CE. */
17357 static const struct asm_opcode
*
17358 opcode_lookup (char **str
)
17362 const struct asm_opcode
*opcode
;
17363 const struct asm_cond
*cond
;
17366 /* Scan up to the end of the mnemonic, which must end in white space,
17367 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17368 for (base
= end
= *str
; *end
!= '\0'; end
++)
17369 if (*end
== ' ' || *end
== '.')
17375 /* Handle a possible width suffix and/or Neon type suffix. */
17380 /* The .w and .n suffixes are only valid if the unified syntax is in
17382 if (unified_syntax
&& end
[1] == 'w')
17384 else if (unified_syntax
&& end
[1] == 'n')
17389 inst
.vectype
.elems
= 0;
17391 *str
= end
+ offset
;
17393 if (end
[offset
] == '.')
17395 /* See if we have a Neon type suffix (possible in either unified or
17396 non-unified ARM syntax mode). */
17397 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17400 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17406 /* Look for unaffixed or special-case affixed mnemonic. */
17407 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17412 if (opcode
->tag
< OT_odd_infix_0
)
17414 inst
.cond
= COND_ALWAYS
;
17418 if (warn_on_deprecated
&& unified_syntax
)
17419 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17420 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17421 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17424 inst
.cond
= cond
->value
;
17428 /* Cannot have a conditional suffix on a mnemonic of less than two
17430 if (end
- base
< 3)
17433 /* Look for suffixed mnemonic. */
17435 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17436 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17438 if (opcode
&& cond
)
17441 switch (opcode
->tag
)
17443 case OT_cinfix3_legacy
:
17444 /* Ignore conditional suffixes matched on infix only mnemonics. */
17448 case OT_cinfix3_deprecated
:
17449 case OT_odd_infix_unc
:
17450 if (!unified_syntax
)
17452 /* else fall through */
17456 case OT_csuf_or_in3
:
17457 inst
.cond
= cond
->value
;
17460 case OT_unconditional
:
17461 case OT_unconditionalF
:
17463 inst
.cond
= cond
->value
;
17466 /* Delayed diagnostic. */
17467 inst
.error
= BAD_COND
;
17468 inst
.cond
= COND_ALWAYS
;
17477 /* Cannot have a usual-position infix on a mnemonic of less than
17478 six characters (five would be a suffix). */
17479 if (end
- base
< 6)
17482 /* Look for infixed mnemonic in the usual position. */
17484 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17488 memcpy (save
, affix
, 2);
17489 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17490 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17492 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17493 memcpy (affix
, save
, 2);
17496 && (opcode
->tag
== OT_cinfix3
17497 || opcode
->tag
== OT_cinfix3_deprecated
17498 || opcode
->tag
== OT_csuf_or_in3
17499 || opcode
->tag
== OT_cinfix3_legacy
))
17502 if (warn_on_deprecated
&& unified_syntax
17503 && (opcode
->tag
== OT_cinfix3
17504 || opcode
->tag
== OT_cinfix3_deprecated
))
17505 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17507 inst
.cond
= cond
->value
;
17514 /* This function generates an initial IT instruction, leaving its block
17515 virtually open for the new instructions. Eventually,
17516 the mask will be updated by now_it_add_mask () each time
17517 a new instruction needs to be included in the IT block.
17518 Finally, the block is closed with close_automatic_it_block ().
17519 The block closure can be requested either from md_assemble (),
17520 a tencode (), or due to a label hook. */
17523 new_automatic_it_block (int cond
)
17525 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17526 now_it
.mask
= 0x18;
17528 now_it
.block_length
= 1;
17529 mapping_state (MAP_THUMB
);
17530 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
17531 now_it
.warn_deprecated
= FALSE
;
17532 now_it
.insn_cond
= TRUE
;
17535 /* Close an automatic IT block.
17536 See comments in new_automatic_it_block (). */
17539 close_automatic_it_block (void)
17541 now_it
.mask
= 0x10;
17542 now_it
.block_length
= 0;
17545 /* Update the mask of the current automatically-generated IT
17546 instruction. See comments in new_automatic_it_block (). */
17549 now_it_add_mask (int cond
)
17551 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17552 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17553 | ((bitvalue) << (nbit)))
17554 const int resulting_bit
= (cond
& 1);
17556 now_it
.mask
&= 0xf;
17557 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17559 (5 - now_it
.block_length
));
17560 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17562 ((5 - now_it
.block_length
) - 1) );
17563 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
17566 #undef SET_BIT_VALUE
17569 /* The IT blocks handling machinery is accessed through the these functions:
17570 it_fsm_pre_encode () from md_assemble ()
17571 set_it_insn_type () optional, from the tencode functions
17572 set_it_insn_type_last () ditto
17573 in_it_block () ditto
17574 it_fsm_post_encode () from md_assemble ()
17575 force_automatic_it_block_close () from label habdling functions
17578 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17579 initializing the IT insn type with a generic initial value depending
17580 on the inst.condition.
17581 2) During the tencode function, two things may happen:
17582 a) The tencode function overrides the IT insn type by
17583 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17584 b) The tencode function queries the IT block state by
17585 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17587 Both set_it_insn_type and in_it_block run the internal FSM state
17588 handling function (handle_it_state), because: a) setting the IT insn
17589 type may incur in an invalid state (exiting the function),
17590 and b) querying the state requires the FSM to be updated.
17591 Specifically we want to avoid creating an IT block for conditional
17592 branches, so it_fsm_pre_encode is actually a guess and we can't
17593 determine whether an IT block is required until the tencode () routine
17594 has decided what type of instruction this actually it.
17595 Because of this, if set_it_insn_type and in_it_block have to be used,
17596 set_it_insn_type has to be called first.
17598 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17599 determines the insn IT type depending on the inst.cond code.
17600 When a tencode () routine encodes an instruction that can be
17601 either outside an IT block, or, in the case of being inside, has to be
17602 the last one, set_it_insn_type_last () will determine the proper
17603 IT instruction type based on the inst.cond code. Otherwise,
17604 set_it_insn_type can be called for overriding that logic or
17605 for covering other cases.
17607 Calling handle_it_state () may not transition the IT block state to
17608 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17609 still queried. Instead, if the FSM determines that the state should
17610 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17611 after the tencode () function: that's what it_fsm_post_encode () does.
17613 Since in_it_block () calls the state handling function to get an
17614 updated state, an error may occur (due to invalid insns combination).
17615 In that case, inst.error is set.
17616 Therefore, inst.error has to be checked after the execution of
17617 the tencode () routine.
17619 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17620 any pending state change (if any) that didn't take place in
17621 handle_it_state () as explained above. */
17624 it_fsm_pre_encode (void)
17626 if (inst
.cond
!= COND_ALWAYS
)
17627 inst
.it_insn_type
= INSIDE_IT_INSN
;
17629 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
17631 now_it
.state_handled
= 0;
17634 /* IT state FSM handling function. */
17637 handle_it_state (void)
17639 now_it
.state_handled
= 1;
17640 now_it
.insn_cond
= FALSE
;
17642 switch (now_it
.state
)
17644 case OUTSIDE_IT_BLOCK
:
17645 switch (inst
.it_insn_type
)
17647 case OUTSIDE_IT_INSN
:
17650 case INSIDE_IT_INSN
:
17651 case INSIDE_IT_LAST_INSN
:
17652 if (thumb_mode
== 0)
17655 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
17656 as_tsktsk (_("Warning: conditional outside an IT block"\
17661 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
17662 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
17664 /* Automatically generate the IT instruction. */
17665 new_automatic_it_block (inst
.cond
);
17666 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
17667 close_automatic_it_block ();
17671 inst
.error
= BAD_OUT_IT
;
17677 case IF_INSIDE_IT_LAST_INSN
:
17678 case NEUTRAL_IT_INSN
:
17682 now_it
.state
= MANUAL_IT_BLOCK
;
17683 now_it
.block_length
= 0;
17688 case AUTOMATIC_IT_BLOCK
:
17689 /* Three things may happen now:
17690 a) We should increment current it block size;
17691 b) We should close current it block (closing insn or 4 insns);
17692 c) We should close current it block and start a new one (due
17693 to incompatible conditions or
17694 4 insns-length block reached). */
17696 switch (inst
.it_insn_type
)
17698 case OUTSIDE_IT_INSN
:
17699 /* The closure of the block shall happen immediatelly,
17700 so any in_it_block () call reports the block as closed. */
17701 force_automatic_it_block_close ();
17704 case INSIDE_IT_INSN
:
17705 case INSIDE_IT_LAST_INSN
:
17706 case IF_INSIDE_IT_LAST_INSN
:
17707 now_it
.block_length
++;
17709 if (now_it
.block_length
> 4
17710 || !now_it_compatible (inst
.cond
))
17712 force_automatic_it_block_close ();
17713 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
17714 new_automatic_it_block (inst
.cond
);
17718 now_it
.insn_cond
= TRUE
;
17719 now_it_add_mask (inst
.cond
);
17722 if (now_it
.state
== AUTOMATIC_IT_BLOCK
17723 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
17724 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
17725 close_automatic_it_block ();
17728 case NEUTRAL_IT_INSN
:
17729 now_it
.block_length
++;
17730 now_it
.insn_cond
= TRUE
;
17732 if (now_it
.block_length
> 4)
17733 force_automatic_it_block_close ();
17735 now_it_add_mask (now_it
.cc
& 1);
17739 close_automatic_it_block ();
17740 now_it
.state
= MANUAL_IT_BLOCK
;
17745 case MANUAL_IT_BLOCK
:
17747 /* Check conditional suffixes. */
17748 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
17751 now_it
.mask
&= 0x1f;
17752 is_last
= (now_it
.mask
== 0x10);
17753 now_it
.insn_cond
= TRUE
;
17755 switch (inst
.it_insn_type
)
17757 case OUTSIDE_IT_INSN
:
17758 inst
.error
= BAD_NOT_IT
;
17761 case INSIDE_IT_INSN
:
17762 if (cond
!= inst
.cond
)
17764 inst
.error
= BAD_IT_COND
;
17769 case INSIDE_IT_LAST_INSN
:
17770 case IF_INSIDE_IT_LAST_INSN
:
17771 if (cond
!= inst
.cond
)
17773 inst
.error
= BAD_IT_COND
;
17778 inst
.error
= BAD_BRANCH
;
17783 case NEUTRAL_IT_INSN
:
17784 /* The BKPT instruction is unconditional even in an IT block. */
17788 inst
.error
= BAD_IT_IT
;
17798 struct depr_insn_mask
17800 unsigned long pattern
;
17801 unsigned long mask
;
17802 const char* description
;
17805 /* List of 16-bit instruction patterns deprecated in an IT block in
17807 static const struct depr_insn_mask depr_it_insns
[] = {
17808 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17809 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17810 { 0xa000, 0xb800, N_("ADR") },
17811 { 0x4800, 0xf800, N_("Literal loads") },
17812 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17813 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17814 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17815 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17816 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17821 it_fsm_post_encode (void)
17825 if (!now_it
.state_handled
)
17826 handle_it_state ();
17828 if (now_it
.insn_cond
17829 && !now_it
.warn_deprecated
17830 && warn_on_deprecated
17831 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
17833 if (inst
.instruction
>= 0x10000)
17835 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17836 "deprecated in ARMv8"));
17837 now_it
.warn_deprecated
= TRUE
;
17841 const struct depr_insn_mask
*p
= depr_it_insns
;
17843 while (p
->mask
!= 0)
17845 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
17847 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17848 "of the following class are deprecated in ARMv8: "
17849 "%s"), p
->description
);
17850 now_it
.warn_deprecated
= TRUE
;
17858 if (now_it
.block_length
> 1)
17860 as_tsktsk (_("IT blocks containing more than one conditional "
17861 "instruction are deprecated in ARMv8"));
17862 now_it
.warn_deprecated
= TRUE
;
17866 is_last
= (now_it
.mask
== 0x10);
17869 now_it
.state
= OUTSIDE_IT_BLOCK
;
17875 force_automatic_it_block_close (void)
17877 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
17879 close_automatic_it_block ();
17880 now_it
.state
= OUTSIDE_IT_BLOCK
;
17888 if (!now_it
.state_handled
)
17889 handle_it_state ();
17891 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
17894 /* Whether OPCODE only has T32 encoding. Since this function is only used by
17895 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
17896 here, hence the "known" in the function name. */
17899 known_t32_only_insn (const struct asm_opcode
*opcode
)
17901 /* Original Thumb-1 wide instruction. */
17902 if (opcode
->tencode
== do_t_blx
17903 || opcode
->tencode
== do_t_branch23
17904 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
17905 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
17908 /* Wide-only instruction added to ARMv8-M. */
17909 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m
)
17910 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
17911 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
17912 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
17918 /* Whether wide instruction variant can be used if available for a valid OPCODE
17922 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
17924 if (known_t32_only_insn (opcode
))
17927 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
17928 of variant T3 of B.W is checked in do_t_branch. */
17929 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
17930 && opcode
->tencode
== do_t_branch
)
17933 /* Wide instruction variants of all instructions with narrow *and* wide
17934 variants become available with ARMv6t2. Other opcodes are either
17935 narrow-only or wide-only and are thus available if OPCODE is valid. */
17936 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
17939 /* OPCODE with narrow only instruction variant or wide variant not
17945 md_assemble (char *str
)
17948 const struct asm_opcode
* opcode
;
17950 /* Align the previous label if needed. */
17951 if (last_label_seen
!= NULL
)
17953 symbol_set_frag (last_label_seen
, frag_now
);
17954 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
17955 S_SET_SEGMENT (last_label_seen
, now_seg
);
17958 memset (&inst
, '\0', sizeof (inst
));
17959 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
17961 opcode
= opcode_lookup (&p
);
17964 /* It wasn't an instruction, but it might be a register alias of
17965 the form alias .req reg, or a Neon .dn/.qn directive. */
17966 if (! create_register_alias (str
, p
)
17967 && ! create_neon_reg_alias (str
, p
))
17968 as_bad (_("bad instruction `%s'"), str
);
17973 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
17974 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17976 /* The value which unconditional instructions should have in place of the
17977 condition field. */
17978 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
17982 arm_feature_set variant
;
17984 variant
= cpu_variant
;
17985 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17986 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
17987 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
17988 /* Check that this instruction is supported for this CPU. */
17989 if (!opcode
->tvariant
17990 || (thumb_mode
== 1
17991 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
17993 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
17996 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
17997 && opcode
->tencode
!= do_t_branch
)
17999 as_bad (_("Thumb does not support conditional execution"));
18003 /* Two things are addressed here:
18004 1) Implicit require narrow instructions on Thumb-1.
18005 This avoids relaxation accidentally introducing Thumb-2
18007 2) Reject wide instructions in non Thumb-2 cores.
18009 Only instructions with narrow and wide variants need to be handled
18010 but selecting all non wide-only instructions is easier. */
18011 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
18012 && !t32_insn_ok (variant
, opcode
))
18014 if (inst
.size_req
== 0)
18016 else if (inst
.size_req
== 4)
18018 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
18019 as_bad (_("selected processor does not support 32bit wide "
18020 "variant of instruction `%s'"), str
);
18022 as_bad (_("selected processor does not support `%s' in "
18023 "Thumb-2 mode"), str
);
18028 inst
.instruction
= opcode
->tvalue
;
18030 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
18032 /* Prepare the it_insn_type for those encodings that don't set
18034 it_fsm_pre_encode ();
18036 opcode
->tencode ();
18038 it_fsm_post_encode ();
18041 if (!(inst
.error
|| inst
.relax
))
18043 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
18044 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
18045 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
18047 as_bad (_("cannot honor width suffix -- `%s'"), str
);
18052 /* Something has gone badly wrong if we try to relax a fixed size
18054 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
18056 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18057 *opcode
->tvariant
);
18058 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18059 set those bits when Thumb-2 32-bit instructions are seen. The impact
18060 of relaxable instructions will be considered later after we finish all
18062 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
18063 variant
= arm_arch_none
;
18065 variant
= cpu_variant
;
18066 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
18067 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18070 check_neon_suffixes
;
18074 mapping_state (MAP_THUMB
);
18077 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18081 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18082 is_bx
= (opcode
->aencode
== do_bx
);
18084 /* Check that this instruction is supported for this CPU. */
18085 if (!(is_bx
&& fix_v4bx
)
18086 && !(opcode
->avariant
&&
18087 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18089 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18094 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18098 inst
.instruction
= opcode
->avalue
;
18099 if (opcode
->tag
== OT_unconditionalF
)
18100 inst
.instruction
|= 0xFU
<< 28;
18102 inst
.instruction
|= inst
.cond
<< 28;
18103 inst
.size
= INSN_SIZE
;
18104 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18106 it_fsm_pre_encode ();
18107 opcode
->aencode ();
18108 it_fsm_post_encode ();
18110 /* Arm mode bx is marked as both v4T and v5 because it's still required
18111 on a hypothetical non-thumb v5 core. */
18113 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18115 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18116 *opcode
->avariant
);
18118 check_neon_suffixes
;
18122 mapping_state (MAP_ARM
);
18127 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18135 check_it_blocks_finished (void)
18140 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18141 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18142 == MANUAL_IT_BLOCK
)
18144 as_warn (_("section '%s' finished with an open IT block."),
18148 if (now_it
.state
== MANUAL_IT_BLOCK
)
18149 as_warn (_("file finished with an open IT block."));
18153 /* Various frobbings of labels and their addresses. */
18156 arm_start_line_hook (void)
18158 last_label_seen
= NULL
;
18162 arm_frob_label (symbolS
* sym
)
18164 last_label_seen
= sym
;
18166 ARM_SET_THUMB (sym
, thumb_mode
);
18168 #if defined OBJ_COFF || defined OBJ_ELF
18169 ARM_SET_INTERWORK (sym
, support_interwork
);
18172 force_automatic_it_block_close ();
18174 /* Note - do not allow local symbols (.Lxxx) to be labelled
18175 as Thumb functions. This is because these labels, whilst
18176 they exist inside Thumb code, are not the entry points for
18177 possible ARM->Thumb calls. Also, these labels can be used
18178 as part of a computed goto or switch statement. eg gcc
18179 can generate code that looks like this:
18181 ldr r2, [pc, .Laaa]
18191 The first instruction loads the address of the jump table.
18192 The second instruction converts a table index into a byte offset.
18193 The third instruction gets the jump address out of the table.
18194 The fourth instruction performs the jump.
18196 If the address stored at .Laaa is that of a symbol which has the
18197 Thumb_Func bit set, then the linker will arrange for this address
18198 to have the bottom bit set, which in turn would mean that the
18199 address computation performed by the third instruction would end
18200 up with the bottom bit set. Since the ARM is capable of unaligned
18201 word loads, the instruction would then load the incorrect address
18202 out of the jump table, and chaos would ensue. */
18203 if (label_is_thumb_function_name
18204 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18205 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18207 /* When the address of a Thumb function is taken the bottom
18208 bit of that address should be set. This will allow
18209 interworking between Arm and Thumb functions to work
18212 THUMB_SET_FUNC (sym
, 1);
18214 label_is_thumb_function_name
= FALSE
;
18217 dwarf2_emit_label (sym
);
18221 arm_data_in_code (void)
18223 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18225 *input_line_pointer
= '/';
18226 input_line_pointer
+= 5;
18227 *input_line_pointer
= 0;
18235 arm_canonicalize_symbol_name (char * name
)
18239 if (thumb_mode
&& (len
= strlen (name
)) > 5
18240 && streq (name
+ len
- 5, "/data"))
18241 *(name
+ len
- 5) = 0;
18246 /* Table of all register names defined by default. The user can
18247 define additional names with .req. Note that all register names
18248 should appear in both upper and lowercase variants. Some registers
18249 also have mixed-case names. */
18251 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18252 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18253 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18254 #define REGSET(p,t) \
18255 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18256 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18257 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18258 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18259 #define REGSETH(p,t) \
18260 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18261 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18262 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18263 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18264 #define REGSET2(p,t) \
18265 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18266 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18267 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18268 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18269 #define SPLRBANK(base,bank,t) \
18270 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18271 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18272 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18273 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18274 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18275 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18277 static const struct reg_entry reg_names
[] =
18279 /* ARM integer registers. */
18280 REGSET(r
, RN
), REGSET(R
, RN
),
18282 /* ATPCS synonyms. */
18283 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18284 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18285 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18287 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18288 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18289 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18291 /* Well-known aliases. */
18292 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18293 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18295 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18296 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18298 /* Coprocessor numbers. */
18299 REGSET(p
, CP
), REGSET(P
, CP
),
18301 /* Coprocessor register numbers. The "cr" variants are for backward
18303 REGSET(c
, CN
), REGSET(C
, CN
),
18304 REGSET(cr
, CN
), REGSET(CR
, CN
),
18306 /* ARM banked registers. */
18307 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18308 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18309 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18310 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18311 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18312 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18313 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18315 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18316 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18317 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18318 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18319 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18320 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18321 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18322 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18324 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18325 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18326 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18327 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18328 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18329 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18330 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18331 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18332 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18334 /* FPA registers. */
18335 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18336 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18338 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18339 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18341 /* VFP SP registers. */
18342 REGSET(s
,VFS
), REGSET(S
,VFS
),
18343 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18345 /* VFP DP Registers. */
18346 REGSET(d
,VFD
), REGSET(D
,VFD
),
18347 /* Extra Neon DP registers. */
18348 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18350 /* Neon QP registers. */
18351 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18353 /* VFP control registers. */
18354 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18355 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18356 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18357 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18358 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18359 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18361 /* Maverick DSP coprocessor registers. */
18362 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18363 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18365 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18366 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18367 REGDEF(dspsc
,0,DSPSC
),
18369 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18370 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18371 REGDEF(DSPSC
,0,DSPSC
),
18373 /* iWMMXt data registers - p0, c0-15. */
18374 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18376 /* iWMMXt control registers - p1, c0-3. */
18377 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18378 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18379 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18380 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18382 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18383 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18384 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18385 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18386 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18388 /* XScale accumulator registers. */
18389 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18395 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18396 within psr_required_here. */
18397 static const struct asm_psr psrs
[] =
18399 /* Backward compatibility notation. Note that "all" is no longer
18400 truly all possible PSR bits. */
18401 {"all", PSR_c
| PSR_f
},
18405 /* Individual flags. */
18411 /* Combinations of flags. */
18412 {"fs", PSR_f
| PSR_s
},
18413 {"fx", PSR_f
| PSR_x
},
18414 {"fc", PSR_f
| PSR_c
},
18415 {"sf", PSR_s
| PSR_f
},
18416 {"sx", PSR_s
| PSR_x
},
18417 {"sc", PSR_s
| PSR_c
},
18418 {"xf", PSR_x
| PSR_f
},
18419 {"xs", PSR_x
| PSR_s
},
18420 {"xc", PSR_x
| PSR_c
},
18421 {"cf", PSR_c
| PSR_f
},
18422 {"cs", PSR_c
| PSR_s
},
18423 {"cx", PSR_c
| PSR_x
},
18424 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18425 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18426 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18427 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18428 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18429 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18430 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18431 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18432 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18433 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18434 {"scf", PSR_s
| PSR_c
| PSR_f
},
18435 {"scx", PSR_s
| PSR_c
| PSR_x
},
18436 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18437 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18438 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18439 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18440 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18441 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18442 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18443 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18444 {"csf", PSR_c
| PSR_s
| PSR_f
},
18445 {"csx", PSR_c
| PSR_s
| PSR_x
},
18446 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18447 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18448 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18449 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18450 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18451 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18452 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18453 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18454 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18455 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18456 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18457 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18458 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18459 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18460 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18461 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18462 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18463 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18464 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18465 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18466 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18467 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18468 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18469 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18470 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18471 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18474 /* Table of V7M psr names. */
18475 static const struct asm_psr v7m_psrs
[] =
18477 {"apsr", 0 }, {"APSR", 0 },
18478 {"iapsr", 1 }, {"IAPSR", 1 },
18479 {"eapsr", 2 }, {"EAPSR", 2 },
18480 {"psr", 3 }, {"PSR", 3 },
18481 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18482 {"ipsr", 5 }, {"IPSR", 5 },
18483 {"epsr", 6 }, {"EPSR", 6 },
18484 {"iepsr", 7 }, {"IEPSR", 7 },
18485 {"msp", 8 }, {"MSP", 8 },
18486 {"psp", 9 }, {"PSP", 9 },
18487 {"primask", 16}, {"PRIMASK", 16},
18488 {"basepri", 17}, {"BASEPRI", 17},
18489 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18490 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18491 {"faultmask", 19}, {"FAULTMASK", 19},
18492 {"control", 20}, {"CONTROL", 20}
18495 /* Table of all shift-in-operand names. */
18496 static const struct asm_shift_name shift_names
[] =
18498 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18499 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18500 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18501 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18502 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18503 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18506 /* Table of all explicit relocation names. */
18508 static struct reloc_entry reloc_names
[] =
18510 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
18511 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
18512 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
18513 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
18514 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
18515 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
18516 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
18517 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
18518 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
18519 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
18520 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
18521 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
18522 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
18523 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
18524 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
18525 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
18526 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
18527 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
18531 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18532 static const struct asm_cond conds
[] =
18536 {"cs", 0x2}, {"hs", 0x2},
18537 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18551 #define UL_BARRIER(L,U,CODE,FEAT) \
18552 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18553 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18555 static struct asm_barrier_opt barrier_opt_names
[] =
18557 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
18558 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
18559 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
18560 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
18561 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
18562 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
18563 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
18564 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
18565 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
18566 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
18567 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
18568 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
18569 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
18570 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
18571 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
18572 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
18577 /* Table of ARM-format instructions. */
18579 /* Macros for gluing together operand strings. N.B. In all cases
18580 other than OPS0, the trailing OP_stop comes from default
18581 zero-initialization of the unspecified elements of the array. */
18582 #define OPS0() { OP_stop, }
18583 #define OPS1(a) { OP_##a, }
18584 #define OPS2(a,b) { OP_##a,OP_##b, }
18585 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18586 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18587 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18588 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18590 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18591 This is useful when mixing operands for ARM and THUMB, i.e. using the
18592 MIX_ARM_THUMB_OPERANDS macro.
18593 In order to use these macros, prefix the number of operands with _
18595 #define OPS_1(a) { a, }
18596 #define OPS_2(a,b) { a,b, }
18597 #define OPS_3(a,b,c) { a,b,c, }
18598 #define OPS_4(a,b,c,d) { a,b,c,d, }
18599 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18600 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18602 /* These macros abstract out the exact format of the mnemonic table and
18603 save some repeated characters. */
18605 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18606 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18607 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18608 THUMB_VARIANT, do_##ae, do_##te }
18610 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18611 a T_MNEM_xyz enumerator. */
18612 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18613 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18614 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18615 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18617 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18618 infix after the third character. */
18619 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18620 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18621 THUMB_VARIANT, do_##ae, do_##te }
18622 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18623 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18624 THUMB_VARIANT, do_##ae, do_##te }
18625 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18626 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18627 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18628 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18629 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18630 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18631 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18632 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18634 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18635 field is still 0xE. Many of the Thumb variants can be executed
18636 conditionally, so this is checked separately. */
18637 #define TUE(mnem, op, top, nops, ops, ae, te) \
18638 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18639 THUMB_VARIANT, do_##ae, do_##te }
18641 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18642 Used by mnemonics that have very minimal differences in the encoding for
18643 ARM and Thumb variants and can be handled in a common function. */
18644 #define TUEc(mnem, op, top, nops, ops, en) \
18645 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18646 THUMB_VARIANT, do_##en, do_##en }
18648 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18649 condition code field. */
18650 #define TUF(mnem, op, top, nops, ops, ae, te) \
18651 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18652 THUMB_VARIANT, do_##ae, do_##te }
18654 /* ARM-only variants of all the above. */
18655 #define CE(mnem, op, nops, ops, ae) \
18656 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18658 #define C3(mnem, op, nops, ops, ae) \
18659 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18661 /* Legacy mnemonics that always have conditional infix after the third
18663 #define CL(mnem, op, nops, ops, ae) \
18664 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18665 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18667 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18668 #define cCE(mnem, op, nops, ops, ae) \
18669 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18671 /* Legacy coprocessor instructions where conditional infix and conditional
18672 suffix are ambiguous. For consistency this includes all FPA instructions,
18673 not just the potentially ambiguous ones. */
18674 #define cCL(mnem, op, nops, ops, ae) \
18675 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18676 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18678 /* Coprocessor, takes either a suffix or a position-3 infix
18679 (for an FPA corner case). */
18680 #define C3E(mnem, op, nops, ops, ae) \
18681 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18682 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18684 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18685 { m1 #m2 m3, OPS##nops ops, \
18686 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18687 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18689 #define CM(m1, m2, op, nops, ops, ae) \
18690 xCM_ (m1, , m2, op, nops, ops, ae), \
18691 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18692 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18693 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18694 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18695 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18696 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18697 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18698 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18699 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18700 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18701 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18702 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18703 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18704 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18705 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18706 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18707 xCM_ (m1, le, m2, op, nops, ops, ae), \
18708 xCM_ (m1, al, m2, op, nops, ops, ae)
18710 #define UE(mnem, op, nops, ops, ae) \
18711 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18713 #define UF(mnem, op, nops, ops, ae) \
18714 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18716 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18717 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18718 use the same encoding function for each. */
18719 #define NUF(mnem, op, nops, ops, enc) \
18720 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18721 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18723 /* Neon data processing, version which indirects through neon_enc_tab for
18724 the various overloaded versions of opcodes. */
18725 #define nUF(mnem, op, nops, ops, enc) \
18726 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18727 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18729 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18731 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18732 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18733 THUMB_VARIANT, do_##enc, do_##enc }
18735 #define NCE(mnem, op, nops, ops, enc) \
18736 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18738 #define NCEF(mnem, op, nops, ops, enc) \
18739 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18741 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18742 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18743 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18744 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18746 #define nCE(mnem, op, nops, ops, enc) \
18747 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18749 #define nCEF(mnem, op, nops, ops, enc) \
18750 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18754 static const struct asm_opcode insns
[] =
18756 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18757 #define THUMB_VARIANT & arm_ext_v4t
18758 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18759 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18760 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18761 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18762 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18763 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18764 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18765 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18766 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18767 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18768 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18769 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18770 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18771 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18772 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18773 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18775 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18776 for setting PSR flag bits. They are obsolete in V6 and do not
18777 have Thumb equivalents. */
18778 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18779 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18780 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
18781 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18782 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18783 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
18784 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18785 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18786 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
18788 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
18789 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
18790 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18791 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18793 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
18794 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18795 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
18797 OP_ADDRGLDR
),ldst
, t_ldst
),
18798 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18800 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18801 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18802 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18803 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18804 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18805 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18807 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18808 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18809 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
18810 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
18813 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
18814 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
18815 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
18816 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
18818 /* Thumb-compatibility pseudo ops. */
18819 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18820 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18821 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18822 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18823 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18824 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18825 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18826 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18827 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
18828 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
18829 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
18830 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
18832 /* These may simplify to neg. */
18833 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18834 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18836 #undef THUMB_VARIANT
18837 #define THUMB_VARIANT & arm_ext_v6
18839 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
18841 /* V1 instructions with no Thumb analogue prior to V6T2. */
18842 #undef THUMB_VARIANT
18843 #define THUMB_VARIANT & arm_ext_v6t2
18845 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18846 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18847 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
18849 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18850 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18851 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
18852 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18854 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18855 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18857 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18858 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18860 /* V1 instructions with no Thumb analogue at all. */
18861 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
18862 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
18864 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18865 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18866 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18867 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18868 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18869 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18870 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18871 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18874 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18875 #undef THUMB_VARIANT
18876 #define THUMB_VARIANT & arm_ext_v4t
18878 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18879 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18881 #undef THUMB_VARIANT
18882 #define THUMB_VARIANT & arm_ext_v6t2
18884 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
18885 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
18887 /* Generic coprocessor instructions. */
18888 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18889 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18890 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18891 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18892 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18893 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18894 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18897 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18899 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18900 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18903 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18904 #undef THUMB_VARIANT
18905 #define THUMB_VARIANT & arm_ext_msr
18907 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
18908 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
18911 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18912 #undef THUMB_VARIANT
18913 #define THUMB_VARIANT & arm_ext_v6t2
18915 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18916 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18917 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18918 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18919 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18920 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18921 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18922 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18925 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18926 #undef THUMB_VARIANT
18927 #define THUMB_VARIANT & arm_ext_v4t
18929 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18930 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18931 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18932 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18933 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18934 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18937 #define ARM_VARIANT & arm_ext_v4t_5
18939 /* ARM Architecture 4T. */
18940 /* Note: bx (and blx) are required on V5, even if the processor does
18941 not support Thumb. */
18942 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
18945 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18946 #undef THUMB_VARIANT
18947 #define THUMB_VARIANT & arm_ext_v5t
18949 /* Note: blx has 2 variants; the .value coded here is for
18950 BLX(2). Only this variant has conditional execution. */
18951 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
18952 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
18954 #undef THUMB_VARIANT
18955 #define THUMB_VARIANT & arm_ext_v6t2
18957 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
18958 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18959 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18960 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18961 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18962 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18963 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18964 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18967 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18968 #undef THUMB_VARIANT
18969 #define THUMB_VARIANT & arm_ext_v5exp
18971 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18972 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18973 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18974 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18976 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18977 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18979 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18980 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18981 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18982 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18984 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18985 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18986 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18987 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18989 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18990 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18992 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18993 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18994 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18995 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18998 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18999 #undef THUMB_VARIANT
19000 #define THUMB_VARIANT & arm_ext_v6t2
19002 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
19003 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
19005 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
19006 ADDRGLDRS
), ldrd
, t_ldstd
),
19008 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19009 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19012 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19014 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
19017 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19018 #undef THUMB_VARIANT
19019 #define THUMB_VARIANT & arm_ext_v6
19021 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19022 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19023 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19024 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19025 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19026 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19027 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19028 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19029 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19030 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
19032 #undef THUMB_VARIANT
19033 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19035 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
19036 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19038 #undef THUMB_VARIANT
19039 #define THUMB_VARIANT & arm_ext_v6t2
19041 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19042 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19044 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
19045 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
19047 /* ARM V6 not included in V7M. */
19048 #undef THUMB_VARIANT
19049 #define THUMB_VARIANT & arm_ext_v6_notm
19050 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19051 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19052 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
19053 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
19054 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19055 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19056 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
19057 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19058 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
19059 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19060 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19061 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19062 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19063 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19064 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
19065 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
19066 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19067 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19068 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
19070 /* ARM V6 not included in V7M (eg. integer SIMD). */
19071 #undef THUMB_VARIANT
19072 #define THUMB_VARIANT & arm_ext_v6_dsp
19073 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19074 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19075 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19076 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19077 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19078 /* Old name for QASX. */
19079 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19080 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19081 /* Old name for QSAX. */
19082 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19083 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19084 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19085 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19086 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19087 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19088 /* Old name for SASX. */
19089 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19090 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19091 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19092 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19093 /* Old name for SHASX. */
19094 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19095 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19096 /* Old name for SHSAX. */
19097 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19098 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19099 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19100 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19101 /* Old name for SSAX. */
19102 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19103 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19104 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19105 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19106 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19107 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19108 /* Old name for UASX. */
19109 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19110 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19111 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19112 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19113 /* Old name for UHASX. */
19114 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19115 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19116 /* Old name for UHSAX. */
19117 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19118 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19119 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19120 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19121 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19122 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19123 /* Old name for UQASX. */
19124 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19125 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19126 /* Old name for UQSAX. */
19127 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19128 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19129 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19130 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19131 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19132 /* Old name for USAX. */
19133 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19134 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19135 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19136 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19137 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19138 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19139 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19140 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19141 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19142 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19143 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19144 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19145 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19146 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19147 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19148 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19149 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19150 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19151 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19152 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19153 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19154 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19155 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19156 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19157 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19158 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19159 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19160 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19161 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19162 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19163 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19164 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19165 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19166 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19169 #define ARM_VARIANT & arm_ext_v6k
19170 #undef THUMB_VARIANT
19171 #define THUMB_VARIANT & arm_ext_v6k
19173 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19174 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19175 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19176 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19178 #undef THUMB_VARIANT
19179 #define THUMB_VARIANT & arm_ext_v6_notm
19180 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19182 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19183 RRnpcb
), strexd
, t_strexd
),
19185 #undef THUMB_VARIANT
19186 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19187 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19189 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19191 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19193 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19195 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19198 #define ARM_VARIANT & arm_ext_sec
19199 #undef THUMB_VARIANT
19200 #define THUMB_VARIANT & arm_ext_sec
19202 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19205 #define ARM_VARIANT & arm_ext_virt
19206 #undef THUMB_VARIANT
19207 #define THUMB_VARIANT & arm_ext_virt
19209 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19210 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19213 #define ARM_VARIANT & arm_ext_pan
19214 #undef THUMB_VARIANT
19215 #define THUMB_VARIANT & arm_ext_pan
19217 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19220 #define ARM_VARIANT & arm_ext_v6t2
19221 #undef THUMB_VARIANT
19222 #define THUMB_VARIANT & arm_ext_v6t2
19224 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19225 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19226 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19227 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19229 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19230 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19232 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19233 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19234 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19235 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19237 #undef THUMB_VARIANT
19238 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19239 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19240 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19242 /* Thumb-only instructions. */
19244 #define ARM_VARIANT NULL
19245 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19246 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19248 /* ARM does not really have an IT instruction, so always allow it.
19249 The opcode is copied from Thumb in order to allow warnings in
19250 -mimplicit-it=[never | arm] modes. */
19252 #define ARM_VARIANT & arm_ext_v1
19253 #undef THUMB_VARIANT
19254 #define THUMB_VARIANT & arm_ext_v6t2
19256 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19257 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19258 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19259 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19260 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19261 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19262 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19263 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19264 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19265 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19266 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19267 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19268 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19269 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19270 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19271 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19272 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19273 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19275 /* Thumb2 only instructions. */
19277 #define ARM_VARIANT NULL
19279 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19280 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19281 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19282 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19283 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19284 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19286 /* Hardware division instructions. */
19288 #define ARM_VARIANT & arm_ext_adiv
19289 #undef THUMB_VARIANT
19290 #define THUMB_VARIANT & arm_ext_div
19292 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19293 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19295 /* ARM V6M/V7 instructions. */
19297 #define ARM_VARIANT & arm_ext_barrier
19298 #undef THUMB_VARIANT
19299 #define THUMB_VARIANT & arm_ext_barrier
19301 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19302 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19303 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19305 /* ARM V7 instructions. */
19307 #define ARM_VARIANT & arm_ext_v7
19308 #undef THUMB_VARIANT
19309 #define THUMB_VARIANT & arm_ext_v7
19311 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19312 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19315 #define ARM_VARIANT & arm_ext_mp
19316 #undef THUMB_VARIANT
19317 #define THUMB_VARIANT & arm_ext_mp
19319 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19321 /* AArchv8 instructions. */
19323 #define ARM_VARIANT & arm_ext_v8
19325 /* Instructions shared between armv8-a and armv8-m. */
19326 #undef THUMB_VARIANT
19327 #define THUMB_VARIANT & arm_ext_atomics
19329 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19330 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19331 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19332 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19333 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19334 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19335 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19336 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19337 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19338 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19340 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19342 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19344 #undef THUMB_VARIANT
19345 #define THUMB_VARIANT & arm_ext_v8
19347 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19348 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19349 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19351 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19353 /* ARMv8 T32 only. */
19355 #define ARM_VARIANT NULL
19356 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19357 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19358 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19360 /* FP for ARMv8. */
19362 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19363 #undef THUMB_VARIANT
19364 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19366 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19367 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19368 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19369 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19370 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19371 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19372 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19373 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19374 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19375 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19376 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19377 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19378 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19379 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19380 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19381 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19382 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19384 /* Crypto v1 extensions. */
19386 #define ARM_VARIANT & fpu_crypto_ext_armv8
19387 #undef THUMB_VARIANT
19388 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19390 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19391 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19392 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19393 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19394 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19395 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19396 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19397 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19398 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19399 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19400 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19401 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19402 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19403 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19406 #define ARM_VARIANT & crc_ext_armv8
19407 #undef THUMB_VARIANT
19408 #define THUMB_VARIANT & crc_ext_armv8
19409 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19410 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19411 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19412 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19413 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19414 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19416 /* ARMv8.2 RAS extension. */
19418 #define ARM_VARIANT & arm_ext_v8_2
19419 #undef THUMB_VARIANT
19420 #define THUMB_VARIANT & arm_ext_v8_2
19421 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
19424 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19425 #undef THUMB_VARIANT
19426 #define THUMB_VARIANT NULL
19428 cCE("wfs", e200110
, 1, (RR
), rd
),
19429 cCE("rfs", e300110
, 1, (RR
), rd
),
19430 cCE("wfc", e400110
, 1, (RR
), rd
),
19431 cCE("rfc", e500110
, 1, (RR
), rd
),
19433 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19434 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19435 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19436 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19438 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19439 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19440 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19441 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19443 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19444 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19445 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19446 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19447 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19448 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19449 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19450 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19451 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19452 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19453 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19454 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19456 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19457 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19458 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19459 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19460 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19461 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19462 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19463 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19464 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19465 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19466 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19467 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19469 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19470 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19471 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19472 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19473 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19474 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19475 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19476 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19477 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19478 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19479 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19480 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19482 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19483 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19484 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19485 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19486 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19487 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19488 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19489 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19490 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19491 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19492 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19493 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19495 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19496 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19497 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19498 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19499 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19500 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19501 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
19502 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
19503 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
19504 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
19505 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
19506 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
19508 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
19509 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
19510 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
19511 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
19512 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
19513 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
19514 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
19515 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
19516 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
19517 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
19518 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
19519 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
19521 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
19522 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
19523 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
19524 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
19525 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
19526 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
19527 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
19528 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
19529 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
19530 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
19531 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
19532 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
19534 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
19535 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
19536 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
19537 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
19538 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
19539 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
19540 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
19541 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
19542 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
19543 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
19544 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
19545 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
19547 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
19548 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
19549 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
19550 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
19551 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
19552 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
19553 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
19554 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
19555 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
19556 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
19557 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
19558 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
19560 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
19561 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
19562 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
19563 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
19564 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
19565 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
19566 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
19567 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
19568 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
19569 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
19570 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
19571 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
19573 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
19574 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
19575 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
19576 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
19577 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
19578 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
19579 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
19580 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
19581 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
19582 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
19583 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
19584 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
19586 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
19587 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
19588 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
19589 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
19590 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
19591 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
19592 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
19593 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
19594 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
19595 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
19596 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
19597 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
19599 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
19600 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
19601 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
19602 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
19603 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
19604 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
19605 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
19606 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
19607 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
19608 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
19609 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
19610 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
19612 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
19613 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
19614 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
19615 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
19616 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
19617 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
19618 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
19619 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
19620 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
19621 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
19622 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
19623 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
19625 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
19626 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
19627 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
19628 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
19629 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
19630 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
19631 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
19632 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
19633 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
19634 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
19635 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
19636 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
19638 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
19639 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
19640 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
19641 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
19642 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
19643 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
19644 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
19645 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
19646 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
19647 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
19648 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
19649 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
19651 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19652 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19653 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19654 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19655 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19656 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19657 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19658 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19659 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19660 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19661 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19662 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19664 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19665 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19666 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19667 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19668 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19669 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19670 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19671 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19672 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19673 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19674 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19675 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19677 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19678 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19679 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19680 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19681 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19682 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19683 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19684 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19685 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19686 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19687 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19688 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19690 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19691 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19692 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19693 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19694 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19695 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19696 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19697 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19698 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19699 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19700 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19701 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19703 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19704 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19705 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19706 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19707 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19708 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19709 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19710 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19711 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19712 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19713 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19714 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19716 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19717 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19718 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19719 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19720 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19721 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19722 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19723 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19724 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19725 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19726 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19727 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19729 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19730 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19731 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19732 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19733 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19734 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19735 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19736 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19737 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19738 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19739 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19740 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19742 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19743 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19744 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19745 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19746 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19747 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19748 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19749 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19750 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19751 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19752 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19753 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19755 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19756 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19757 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19758 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19759 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19760 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19761 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19762 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19763 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19764 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19765 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19766 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19768 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19769 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19770 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19771 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19772 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19773 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19774 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19775 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19776 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19777 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19778 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19779 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19781 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19782 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19783 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19784 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19785 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19786 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19787 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19788 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19789 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19790 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19791 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19792 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19794 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19795 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19796 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19797 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19798 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19799 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19800 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19801 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19802 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19803 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19804 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19805 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19807 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19808 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19809 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19810 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19811 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19812 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19813 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19814 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19815 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19816 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19817 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19818 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19820 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19821 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19822 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19823 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19825 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
19826 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
19827 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
19828 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
19829 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
19830 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
19831 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
19832 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
19833 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
19834 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
19835 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
19836 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
19838 /* The implementation of the FIX instruction is broken on some
19839 assemblers, in that it accepts a precision specifier as well as a
19840 rounding specifier, despite the fact that this is meaningless.
19841 To be more compatible, we accept it as well, though of course it
19842 does not set any bits. */
19843 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
19844 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
19845 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
19846 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
19847 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
19848 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
19849 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
19850 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
19851 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
19852 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
19853 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
19854 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
19855 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
19857 /* Instructions that were new with the real FPA, call them V2. */
19859 #define ARM_VARIANT & fpu_fpa_ext_v2
19861 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19862 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19863 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19864 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19865 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19866 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19869 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19871 /* Moves and type conversions. */
19872 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19873 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
19874 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
19875 cCE("fmstat", ef1fa10
, 0, (), noargs
),
19876 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
19877 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
19878 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19879 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19880 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19881 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19882 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19883 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19884 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
19885 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
19887 /* Memory operations. */
19888 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19889 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19890 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19891 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19892 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19893 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19894 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19895 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19896 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19897 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19898 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19899 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19900 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19901 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19902 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19903 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19904 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19905 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19907 /* Monadic operations. */
19908 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19909 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19910 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19912 /* Dyadic operations. */
19913 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19914 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19915 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19916 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19917 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19918 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19919 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19920 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19921 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19924 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19925 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
19926 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19927 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
19929 /* Double precision load/store are still present on single precision
19930 implementations. */
19931 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19932 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19933 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19934 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19935 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19936 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19937 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19938 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19939 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19940 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19943 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19945 /* Moves and type conversions. */
19946 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19947 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19948 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19949 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19950 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19951 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19952 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19953 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19954 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19955 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19956 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19957 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19958 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19960 /* Monadic operations. */
19961 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19962 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19963 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19965 /* Dyadic operations. */
19966 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19967 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19968 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19969 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19970 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19971 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19972 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19973 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19974 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19977 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19978 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
19979 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19980 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
19983 #define ARM_VARIANT & fpu_vfp_ext_v2
19985 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
19986 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
19987 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
19988 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
19990 /* Instructions which may belong to either the Neon or VFP instruction sets.
19991 Individual encoder functions perform additional architecture checks. */
19993 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19994 #undef THUMB_VARIANT
19995 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19997 /* These mnemonics are unique to VFP. */
19998 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
19999 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
20000 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20001 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20002 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20003 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20004 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20005 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
20006 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
20007 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
20009 /* Mnemonics shared by Neon and VFP. */
20010 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
20011 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20012 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20014 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20015 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20017 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20018 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20020 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20021 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20022 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20023 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20024 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20025 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20026 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20027 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20029 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
20030 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
20031 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
20032 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
20035 /* NOTE: All VMOV encoding is special-cased! */
20036 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
20037 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
20039 #undef THUMB_VARIANT
20040 #define THUMB_VARIANT & fpu_neon_ext_v1
20042 #define ARM_VARIANT & fpu_neon_ext_v1
20044 /* Data processing with three registers of the same length. */
20045 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20046 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
20047 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
20048 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20049 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20050 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20051 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20052 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20053 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20054 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20055 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20056 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20057 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20058 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20059 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20060 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20061 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20062 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20063 /* If not immediate, fall back to neon_dyadic_i64_su.
20064 shl_imm should accept I8 I16 I32 I64,
20065 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20066 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
20067 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
20068 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
20069 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
20070 /* Logic ops, types optional & ignored. */
20071 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20072 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20073 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20074 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20075 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20076 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20077 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20078 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20079 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20080 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20081 /* Bitfield ops, untyped. */
20082 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20083 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20084 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20085 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20086 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20087 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20088 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20089 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20090 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20091 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20092 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20093 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20094 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20095 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20096 back to neon_dyadic_if_su. */
20097 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20098 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20099 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20100 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20101 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20102 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20103 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20104 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20105 /* Comparison. Type I8 I16 I32 F32. */
20106 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20107 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20108 /* As above, D registers only. */
20109 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20110 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20111 /* Int and float variants, signedness unimportant. */
20112 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20113 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20114 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20115 /* Add/sub take types I8 I16 I32 I64 F32. */
20116 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20117 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20118 /* vtst takes sizes 8, 16, 32. */
20119 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20120 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20121 /* VMUL takes I8 I16 I32 F32 P8. */
20122 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20123 /* VQD{R}MULH takes S16 S32. */
20124 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20125 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20126 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20127 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20128 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20129 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20130 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20131 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20132 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20133 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20134 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20135 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20136 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20137 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20138 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20139 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20140 /* ARM v8.1 extension. */
20141 nUF(vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20142 nUF(vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20143 nUF(vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20144 nUF(vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20146 /* Two address, int/float. Types S8 S16 S32 F32. */
20147 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20148 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20150 /* Data processing with two registers and a shift amount. */
20151 /* Right shifts, and variants with rounding.
20152 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20153 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20154 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20155 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20156 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20157 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20158 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20159 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20160 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20161 /* Shift and insert. Sizes accepted 8 16 32 64. */
20162 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20163 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20164 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20165 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20166 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20167 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20168 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20169 /* Right shift immediate, saturating & narrowing, with rounding variants.
20170 Types accepted S16 S32 S64 U16 U32 U64. */
20171 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20172 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20173 /* As above, unsigned. Types accepted S16 S32 S64. */
20174 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20175 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20176 /* Right shift narrowing. Types accepted I16 I32 I64. */
20177 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20178 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20179 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20180 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20181 /* CVT with optional immediate for fixed-point variant. */
20182 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20184 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20185 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20187 /* Data processing, three registers of different lengths. */
20188 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20189 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20190 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20191 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20192 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20193 /* If not scalar, fall back to neon_dyadic_long.
20194 Vector types as above, scalar types S16 S32 U16 U32. */
20195 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20196 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20197 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20198 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20199 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20200 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20201 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20202 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20203 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20204 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20205 /* Saturating doubling multiplies. Types S16 S32. */
20206 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20207 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20208 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20209 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20210 S16 S32 U16 U32. */
20211 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20213 /* Extract. Size 8. */
20214 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20215 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20217 /* Two registers, miscellaneous. */
20218 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20219 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20220 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20221 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20222 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20223 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20224 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20225 /* Vector replicate. Sizes 8 16 32. */
20226 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20227 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20228 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20229 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20230 /* VMOVN. Types I16 I32 I64. */
20231 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20232 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20233 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20234 /* VQMOVUN. Types S16 S32 S64. */
20235 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20236 /* VZIP / VUZP. Sizes 8 16 32. */
20237 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20238 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20239 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20240 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20241 /* VQABS / VQNEG. Types S8 S16 S32. */
20242 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20243 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20244 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20245 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20246 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20247 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20248 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20249 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20250 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20251 /* Reciprocal estimates. Types U32 F32. */
20252 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20253 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20254 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20255 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20256 /* VCLS. Types S8 S16 S32. */
20257 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20258 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20259 /* VCLZ. Types I8 I16 I32. */
20260 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20261 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20262 /* VCNT. Size 8. */
20263 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20264 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20265 /* Two address, untyped. */
20266 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20267 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20268 /* VTRN. Sizes 8 16 32. */
20269 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20270 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20272 /* Table lookup. Size 8. */
20273 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20274 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20276 #undef THUMB_VARIANT
20277 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20279 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20281 /* Neon element/structure load/store. */
20282 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20283 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20284 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20285 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20286 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20287 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20288 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20289 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20291 #undef THUMB_VARIANT
20292 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20294 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20295 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20296 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20297 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20298 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20299 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20300 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20301 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20302 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20303 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20305 #undef THUMB_VARIANT
20306 #define THUMB_VARIANT & fpu_vfp_ext_v3
20308 #define ARM_VARIANT & fpu_vfp_ext_v3
20310 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20311 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20312 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20313 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20314 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20315 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20316 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20317 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20318 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20321 #define ARM_VARIANT & fpu_vfp_ext_fma
20322 #undef THUMB_VARIANT
20323 #define THUMB_VARIANT & fpu_vfp_ext_fma
20324 /* Mnemonics shared by Neon and VFP. These are included in the
20325 VFP FMA variant; NEON and VFP FMA always includes the NEON
20326 FMA instructions. */
20327 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20328 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20329 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20330 the v form should always be used. */
20331 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20332 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20333 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20334 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20335 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20336 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20338 #undef THUMB_VARIANT
20340 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20342 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20343 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20344 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20345 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20346 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20347 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20348 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20349 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20352 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20354 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20355 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20356 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20357 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20358 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20359 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20360 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20361 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20362 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20363 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20364 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20365 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20366 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20367 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20368 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20369 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20370 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20371 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20372 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20373 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20374 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20375 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20376 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20377 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20378 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20379 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20380 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20381 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20382 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20383 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20384 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20385 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20386 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20387 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20388 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20389 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20390 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20391 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20392 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20393 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20394 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20395 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20396 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20397 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20398 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20399 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20400 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20401 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20402 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20403 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20404 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20405 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20406 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20407 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20408 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20409 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20410 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20411 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20412 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20413 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20414 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20415 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20416 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20417 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20418 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20419 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20420 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20421 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20422 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20423 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20424 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20425 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20426 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20427 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20428 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20429 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20430 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20431 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20432 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20433 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20434 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20435 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20436 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20437 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20438 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20439 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20440 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20441 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20442 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20443 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20444 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20445 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20446 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20447 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20448 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20449 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20450 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20451 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20452 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20453 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20454 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20455 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20456 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20457 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20458 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20459 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20460 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20461 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20462 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20463 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20464 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20465 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20466 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20467 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20468 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20469 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20470 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20471 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20472 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20473 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20474 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20475 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20476 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20477 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20478 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20479 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20480 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20481 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20482 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20483 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20484 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20485 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20486 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20487 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20488 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20489 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20490 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20491 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20492 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20493 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20494 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20495 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20496 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20497 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20498 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20499 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20500 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20501 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20502 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20503 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20504 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20505 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20506 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20507 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20508 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20509 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20510 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20511 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20512 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20513 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20514 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20515 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
20518 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20520 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
20521 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
20522 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
20523 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20524 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20525 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20526 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20527 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20528 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20529 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20530 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20531 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20532 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20533 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20534 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20535 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20536 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20537 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20538 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20539 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20540 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
20541 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20542 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20543 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20544 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20545 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20546 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20547 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20548 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20549 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20550 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20551 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20552 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20553 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20554 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20555 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20556 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20557 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20558 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20559 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20560 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20561 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20562 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20563 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20564 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20565 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20566 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20567 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20568 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20569 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20570 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20571 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20572 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20573 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20574 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20575 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20576 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20579 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20581 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20582 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20583 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20584 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20585 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20586 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20587 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20588 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20589 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
20590 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
20591 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
20592 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
20593 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
20594 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
20595 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
20596 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
20597 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
20598 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
20599 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
20600 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
20601 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
20602 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
20603 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
20604 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
20605 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
20606 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
20607 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
20608 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
20609 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
20610 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
20611 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
20612 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
20613 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
20614 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
20615 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
20616 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
20617 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
20618 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
20619 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
20620 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
20621 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
20622 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
20623 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
20624 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
20625 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
20626 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
20627 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
20628 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
20629 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
20630 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
20631 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
20632 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
20633 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
20634 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
20635 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20636 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20637 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20638 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20639 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20640 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20641 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
20642 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
20643 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
20644 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
20645 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20646 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20647 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20648 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20649 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20650 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20651 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20652 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20653 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20654 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20655 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20656 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20659 #define ARM_VARIANT NULL
20660 #undef THUMB_VARIANT
20661 #define THUMB_VARIANT & arm_ext_v8m
20662 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
20663 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
20666 #undef THUMB_VARIANT
20692 /* MD interface: bits in the object file. */
20694 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20695 for use in the a.out file, and stores them in the array pointed to by buf.
20696 This knows about the endian-ness of the target machine and does
20697 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20698 2 (short) and 4 (long) Floating numbers are put out as a series of
20699 LITTLENUMS (shorts, here at least). */
20702 md_number_to_chars (char * buf
, valueT val
, int n
)
20704 if (target_big_endian
)
20705 number_to_chars_bigendian (buf
, val
, n
);
20707 number_to_chars_littleendian (buf
, val
, n
);
20711 md_chars_to_number (char * buf
, int n
)
20714 unsigned char * where
= (unsigned char *) buf
;
20716 if (target_big_endian
)
20721 result
|= (*where
++ & 255);
20729 result
|= (where
[n
] & 255);
20736 /* MD interface: Sections. */
20738 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20739 that an rs_machine_dependent frag may reach. */
20742 arm_frag_max_var (fragS
*fragp
)
20744 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20745 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20747 Note that we generate relaxable instructions even for cases that don't
20748 really need it, like an immediate that's a trivial constant. So we're
20749 overestimating the instruction size for some of those cases. Rather
20750 than putting more intelligence here, it would probably be better to
20751 avoid generating a relaxation frag in the first place when it can be
20752 determined up front that a short instruction will suffice. */
20754 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
20758 /* Estimate the size of a frag before relaxing. Assume everything fits in
20762 md_estimate_size_before_relax (fragS
* fragp
,
20763 segT segtype ATTRIBUTE_UNUSED
)
20769 /* Convert a machine dependent frag. */
20772 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
20774 unsigned long insn
;
20775 unsigned long old_op
;
20783 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20785 old_op
= bfd_get_16(abfd
, buf
);
20786 if (fragp
->fr_symbol
)
20788 exp
.X_op
= O_symbol
;
20789 exp
.X_add_symbol
= fragp
->fr_symbol
;
20793 exp
.X_op
= O_constant
;
20795 exp
.X_add_number
= fragp
->fr_offset
;
20796 opcode
= fragp
->fr_subtype
;
20799 case T_MNEM_ldr_pc
:
20800 case T_MNEM_ldr_pc2
:
20801 case T_MNEM_ldr_sp
:
20802 case T_MNEM_str_sp
:
20809 if (fragp
->fr_var
== 4)
20811 insn
= THUMB_OP32 (opcode
);
20812 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
20814 insn
|= (old_op
& 0x700) << 4;
20818 insn
|= (old_op
& 7) << 12;
20819 insn
|= (old_op
& 0x38) << 13;
20821 insn
|= 0x00000c00;
20822 put_thumb32_insn (buf
, insn
);
20823 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
20827 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
20829 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
20832 if (fragp
->fr_var
== 4)
20834 insn
= THUMB_OP32 (opcode
);
20835 insn
|= (old_op
& 0xf0) << 4;
20836 put_thumb32_insn (buf
, insn
);
20837 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
20841 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20842 exp
.X_add_number
-= 4;
20850 if (fragp
->fr_var
== 4)
20852 int r0off
= (opcode
== T_MNEM_mov
20853 || opcode
== T_MNEM_movs
) ? 0 : 8;
20854 insn
= THUMB_OP32 (opcode
);
20855 insn
= (insn
& 0xe1ffffff) | 0x10000000;
20856 insn
|= (old_op
& 0x700) << r0off
;
20857 put_thumb32_insn (buf
, insn
);
20858 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20862 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
20867 if (fragp
->fr_var
== 4)
20869 insn
= THUMB_OP32(opcode
);
20870 put_thumb32_insn (buf
, insn
);
20871 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
20874 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
20878 if (fragp
->fr_var
== 4)
20880 insn
= THUMB_OP32(opcode
);
20881 insn
|= (old_op
& 0xf00) << 14;
20882 put_thumb32_insn (buf
, insn
);
20883 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
20886 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
20889 case T_MNEM_add_sp
:
20890 case T_MNEM_add_pc
:
20891 case T_MNEM_inc_sp
:
20892 case T_MNEM_dec_sp
:
20893 if (fragp
->fr_var
== 4)
20895 /* ??? Choose between add and addw. */
20896 insn
= THUMB_OP32 (opcode
);
20897 insn
|= (old_op
& 0xf0) << 4;
20898 put_thumb32_insn (buf
, insn
);
20899 if (opcode
== T_MNEM_add_pc
)
20900 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
20902 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20905 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20913 if (fragp
->fr_var
== 4)
20915 insn
= THUMB_OP32 (opcode
);
20916 insn
|= (old_op
& 0xf0) << 4;
20917 insn
|= (old_op
& 0xf) << 16;
20918 put_thumb32_insn (buf
, insn
);
20919 if (insn
& (1 << 20))
20920 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20922 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20925 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20931 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
20932 (enum bfd_reloc_code_real
) reloc_type
);
20933 fixp
->fx_file
= fragp
->fr_file
;
20934 fixp
->fx_line
= fragp
->fr_line
;
20935 fragp
->fr_fix
+= fragp
->fr_var
;
20937 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20938 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
20939 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
20940 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
20943 /* Return the size of a relaxable immediate operand instruction.
20944 SHIFT and SIZE specify the form of the allowable immediate. */
20946 relax_immediate (fragS
*fragp
, int size
, int shift
)
20952 /* ??? Should be able to do better than this. */
20953 if (fragp
->fr_symbol
)
20956 low
= (1 << shift
) - 1;
20957 mask
= (1 << (shift
+ size
)) - (1 << shift
);
20958 offset
= fragp
->fr_offset
;
20959 /* Force misaligned offsets to 32-bit variant. */
20962 if (offset
& ~mask
)
20967 /* Get the address of a symbol during relaxation. */
20969 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
20975 sym
= fragp
->fr_symbol
;
20976 sym_frag
= symbol_get_frag (sym
);
20977 know (S_GET_SEGMENT (sym
) != absolute_section
20978 || sym_frag
== &zero_address_frag
);
20979 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
20981 /* If frag has yet to be reached on this pass, assume it will
20982 move by STRETCH just as we did. If this is not so, it will
20983 be because some frag between grows, and that will force
20987 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
20991 /* Adjust stretch for any alignment frag. Note that if have
20992 been expanding the earlier code, the symbol may be
20993 defined in what appears to be an earlier frag. FIXME:
20994 This doesn't handle the fr_subtype field, which specifies
20995 a maximum number of bytes to skip when doing an
20997 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
20999 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
21002 stretch
= - ((- stretch
)
21003 & ~ ((1 << (int) f
->fr_offset
) - 1));
21005 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
21017 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21020 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
21025 /* Assume worst case for symbols not known to be in the same section. */
21026 if (fragp
->fr_symbol
== NULL
21027 || !S_IS_DEFINED (fragp
->fr_symbol
)
21028 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21029 || S_IS_WEAK (fragp
->fr_symbol
))
21032 val
= relaxed_symbol_addr (fragp
, stretch
);
21033 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
21034 addr
= (addr
+ 4) & ~3;
21035 /* Force misaligned targets to 32-bit variant. */
21039 if (val
< 0 || val
> 1020)
21044 /* Return the size of a relaxable add/sub immediate instruction. */
21046 relax_addsub (fragS
*fragp
, asection
*sec
)
21051 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21052 op
= bfd_get_16(sec
->owner
, buf
);
21053 if ((op
& 0xf) == ((op
>> 4) & 0xf))
21054 return relax_immediate (fragp
, 8, 0);
21056 return relax_immediate (fragp
, 3, 0);
21059 /* Return TRUE iff the definition of symbol S could be pre-empted
21060 (overridden) at link or load time. */
21062 symbol_preemptible (symbolS
*s
)
21064 /* Weak symbols can always be pre-empted. */
21068 /* Non-global symbols cannot be pre-empted. */
21069 if (! S_IS_EXTERNAL (s
))
21073 /* In ELF, a global symbol can be marked protected, or private. In that
21074 case it can't be pre-empted (other definitions in the same link unit
21075 would violate the ODR). */
21076 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21080 /* Other global symbols might be pre-empted. */
21084 /* Return the size of a relaxable branch instruction. BITS is the
21085 size of the offset field in the narrow instruction. */
21088 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21094 /* Assume worst case for symbols not known to be in the same section. */
21095 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21096 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21097 || S_IS_WEAK (fragp
->fr_symbol
))
21101 /* A branch to a function in ARM state will require interworking. */
21102 if (S_IS_DEFINED (fragp
->fr_symbol
)
21103 && ARM_IS_FUNC (fragp
->fr_symbol
))
21107 if (symbol_preemptible (fragp
->fr_symbol
))
21110 val
= relaxed_symbol_addr (fragp
, stretch
);
21111 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21114 /* Offset is a signed value *2 */
21116 if (val
>= limit
|| val
< -limit
)
21122 /* Relax a machine dependent frag. This returns the amount by which
21123 the current size of the frag should change. */
21126 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21131 oldsize
= fragp
->fr_var
;
21132 switch (fragp
->fr_subtype
)
21134 case T_MNEM_ldr_pc2
:
21135 newsize
= relax_adr (fragp
, sec
, stretch
);
21137 case T_MNEM_ldr_pc
:
21138 case T_MNEM_ldr_sp
:
21139 case T_MNEM_str_sp
:
21140 newsize
= relax_immediate (fragp
, 8, 2);
21144 newsize
= relax_immediate (fragp
, 5, 2);
21148 newsize
= relax_immediate (fragp
, 5, 1);
21152 newsize
= relax_immediate (fragp
, 5, 0);
21155 newsize
= relax_adr (fragp
, sec
, stretch
);
21161 newsize
= relax_immediate (fragp
, 8, 0);
21164 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21167 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21169 case T_MNEM_add_sp
:
21170 case T_MNEM_add_pc
:
21171 newsize
= relax_immediate (fragp
, 8, 2);
21173 case T_MNEM_inc_sp
:
21174 case T_MNEM_dec_sp
:
21175 newsize
= relax_immediate (fragp
, 7, 2);
21181 newsize
= relax_addsub (fragp
, sec
);
21187 fragp
->fr_var
= newsize
;
21188 /* Freeze wide instructions that are at or before the same location as
21189 in the previous pass. This avoids infinite loops.
21190 Don't freeze them unconditionally because targets may be artificially
21191 misaligned by the expansion of preceding frags. */
21192 if (stretch
<= 0 && newsize
> 2)
21194 md_convert_frag (sec
->owner
, sec
, fragp
);
21198 return newsize
- oldsize
;
21201 /* Round up a section size to the appropriate boundary. */
21204 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21207 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21208 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21210 /* For a.out, force the section size to be aligned. If we don't do
21211 this, BFD will align it for us, but it will not write out the
21212 final bytes of the section. This may be a bug in BFD, but it is
21213 easier to fix it here since that is how the other a.out targets
21217 align
= bfd_get_section_alignment (stdoutput
, segment
);
21218 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21225 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21226 of an rs_align_code fragment. */
21229 arm_handle_align (fragS
* fragP
)
21231 static char const arm_noop
[2][2][4] =
21234 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21235 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21238 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21239 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21242 static char const thumb_noop
[2][2][2] =
21245 {0xc0, 0x46}, /* LE */
21246 {0x46, 0xc0}, /* BE */
21249 {0x00, 0xbf}, /* LE */
21250 {0xbf, 0x00} /* BE */
21253 static char const wide_thumb_noop
[2][4] =
21254 { /* Wide Thumb-2 */
21255 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21256 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21259 unsigned bytes
, fix
, noop_size
;
21262 const char *narrow_noop
= NULL
;
21267 if (fragP
->fr_type
!= rs_align_code
)
21270 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21271 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21274 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21275 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21277 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21279 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21281 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21282 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21284 narrow_noop
= thumb_noop
[1][target_big_endian
];
21285 noop
= wide_thumb_noop
[target_big_endian
];
21288 noop
= thumb_noop
[0][target_big_endian
];
21296 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21297 ? selected_cpu
: arm_arch_none
,
21299 [target_big_endian
];
21306 fragP
->fr_var
= noop_size
;
21308 if (bytes
& (noop_size
- 1))
21310 fix
= bytes
& (noop_size
- 1);
21312 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21314 memset (p
, 0, fix
);
21321 if (bytes
& noop_size
)
21323 /* Insert a narrow noop. */
21324 memcpy (p
, narrow_noop
, noop_size
);
21326 bytes
-= noop_size
;
21330 /* Use wide noops for the remainder */
21334 while (bytes
>= noop_size
)
21336 memcpy (p
, noop
, noop_size
);
21338 bytes
-= noop_size
;
21342 fragP
->fr_fix
+= fix
;
21345 /* Called from md_do_align. Used to create an alignment
21346 frag in a code section. */
21349 arm_frag_align_code (int n
, int max
)
21353 /* We assume that there will never be a requirement
21354 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21355 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21360 _("alignments greater than %d bytes not supported in .text sections."),
21361 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21362 as_fatal ("%s", err_msg
);
21365 p
= frag_var (rs_align_code
,
21366 MAX_MEM_FOR_RS_ALIGN_CODE
,
21368 (relax_substateT
) max
,
21375 /* Perform target specific initialisation of a frag.
21376 Note - despite the name this initialisation is not done when the frag
21377 is created, but only when its type is assigned. A frag can be created
21378 and used a long time before its type is set, so beware of assuming that
21379 this initialisationis performed first. */
21383 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21385 /* Record whether this frag is in an ARM or a THUMB area. */
21386 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21389 #else /* OBJ_ELF is defined. */
21391 arm_init_frag (fragS
* fragP
, int max_chars
)
21393 int frag_thumb_mode
;
21395 /* If the current ARM vs THUMB mode has not already
21396 been recorded into this frag then do so now. */
21397 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21398 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21400 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
21402 /* Record a mapping symbol for alignment frags. We will delete this
21403 later if the alignment ends up empty. */
21404 switch (fragP
->fr_type
)
21407 case rs_align_test
:
21409 mapping_state_2 (MAP_DATA
, max_chars
);
21411 case rs_align_code
:
21412 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21419 /* When we change sections we need to issue a new mapping symbol. */
21422 arm_elf_change_section (void)
21424 /* Link an unlinked unwind index table section to the .text section. */
21425 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21426 && elf_linked_to_section (now_seg
) == NULL
)
21427 elf_linked_to_section (now_seg
) = text_section
;
21431 arm_elf_section_type (const char * str
, size_t len
)
21433 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21434 return SHT_ARM_EXIDX
;
21439 /* Code to deal with unwinding tables. */
21441 static void add_unwind_adjustsp (offsetT
);
21443 /* Generate any deferred unwind frame offset. */
21446 flush_pending_unwind (void)
21450 offset
= unwind
.pending_offset
;
21451 unwind
.pending_offset
= 0;
21453 add_unwind_adjustsp (offset
);
21456 /* Add an opcode to this list for this function. Two-byte opcodes should
21457 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21461 add_unwind_opcode (valueT op
, int length
)
21463 /* Add any deferred stack adjustment. */
21464 if (unwind
.pending_offset
)
21465 flush_pending_unwind ();
21467 unwind
.sp_restored
= 0;
21469 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21471 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21472 if (unwind
.opcodes
)
21473 unwind
.opcodes
= (unsigned char *) xrealloc (unwind
.opcodes
,
21474 unwind
.opcode_alloc
);
21476 unwind
.opcodes
= (unsigned char *) xmalloc (unwind
.opcode_alloc
);
21481 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
21483 unwind
.opcode_count
++;
21487 /* Add unwind opcodes to adjust the stack pointer. */
21490 add_unwind_adjustsp (offsetT offset
)
21494 if (offset
> 0x200)
21496 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21501 /* Long form: 0xb2, uleb128. */
21502 /* This might not fit in a word so add the individual bytes,
21503 remembering the list is built in reverse order. */
21504 o
= (valueT
) ((offset
- 0x204) >> 2);
21506 add_unwind_opcode (0, 1);
21508 /* Calculate the uleb128 encoding of the offset. */
21512 bytes
[n
] = o
& 0x7f;
21518 /* Add the insn. */
21520 add_unwind_opcode (bytes
[n
- 1], 1);
21521 add_unwind_opcode (0xb2, 1);
21523 else if (offset
> 0x100)
21525 /* Two short opcodes. */
21526 add_unwind_opcode (0x3f, 1);
21527 op
= (offset
- 0x104) >> 2;
21528 add_unwind_opcode (op
, 1);
21530 else if (offset
> 0)
21532 /* Short opcode. */
21533 op
= (offset
- 4) >> 2;
21534 add_unwind_opcode (op
, 1);
21536 else if (offset
< 0)
21539 while (offset
> 0x100)
21541 add_unwind_opcode (0x7f, 1);
21544 op
= ((offset
- 4) >> 2) | 0x40;
21545 add_unwind_opcode (op
, 1);
21549 /* Finish the list of unwind opcodes for this function. */
21551 finish_unwind_opcodes (void)
21555 if (unwind
.fp_used
)
21557 /* Adjust sp as necessary. */
21558 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
21559 flush_pending_unwind ();
21561 /* After restoring sp from the frame pointer. */
21562 op
= 0x90 | unwind
.fp_reg
;
21563 add_unwind_opcode (op
, 1);
21566 flush_pending_unwind ();
21570 /* Start an exception table entry. If idx is nonzero this is an index table
21574 start_unwind_section (const segT text_seg
, int idx
)
21576 const char * text_name
;
21577 const char * prefix
;
21578 const char * prefix_once
;
21579 const char * group_name
;
21583 size_t sec_name_len
;
21590 prefix
= ELF_STRING_ARM_unwind
;
21591 prefix_once
= ELF_STRING_ARM_unwind_once
;
21592 type
= SHT_ARM_EXIDX
;
21596 prefix
= ELF_STRING_ARM_unwind_info
;
21597 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
21598 type
= SHT_PROGBITS
;
21601 text_name
= segment_name (text_seg
);
21602 if (streq (text_name
, ".text"))
21605 if (strncmp (text_name
, ".gnu.linkonce.t.",
21606 strlen (".gnu.linkonce.t.")) == 0)
21608 prefix
= prefix_once
;
21609 text_name
+= strlen (".gnu.linkonce.t.");
21612 prefix_len
= strlen (prefix
);
21613 text_len
= strlen (text_name
);
21614 sec_name_len
= prefix_len
+ text_len
;
21615 sec_name
= (char *) xmalloc (sec_name_len
+ 1);
21616 memcpy (sec_name
, prefix
, prefix_len
);
21617 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
21618 sec_name
[prefix_len
+ text_len
] = '\0';
21624 /* Handle COMDAT group. */
21625 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
21627 group_name
= elf_group_name (text_seg
);
21628 if (group_name
== NULL
)
21630 as_bad (_("Group section `%s' has no group signature"),
21631 segment_name (text_seg
));
21632 ignore_rest_of_line ();
21635 flags
|= SHF_GROUP
;
21639 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
21641 /* Set the section link for index tables. */
21643 elf_linked_to_section (now_seg
) = text_seg
;
21647 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21648 personality routine data. Returns zero, or the index table value for
21649 an inline entry. */
21652 create_unwind_entry (int have_data
)
21657 /* The current word of data. */
21659 /* The number of bytes left in this word. */
21662 finish_unwind_opcodes ();
21664 /* Remember the current text section. */
21665 unwind
.saved_seg
= now_seg
;
21666 unwind
.saved_subseg
= now_subseg
;
21668 start_unwind_section (now_seg
, 0);
21670 if (unwind
.personality_routine
== NULL
)
21672 if (unwind
.personality_index
== -2)
21675 as_bad (_("handlerdata in cantunwind frame"));
21676 return 1; /* EXIDX_CANTUNWIND. */
21679 /* Use a default personality routine if none is specified. */
21680 if (unwind
.personality_index
== -1)
21682 if (unwind
.opcode_count
> 3)
21683 unwind
.personality_index
= 1;
21685 unwind
.personality_index
= 0;
21688 /* Space for the personality routine entry. */
21689 if (unwind
.personality_index
== 0)
21691 if (unwind
.opcode_count
> 3)
21692 as_bad (_("too many unwind opcodes for personality routine 0"));
21696 /* All the data is inline in the index table. */
21699 while (unwind
.opcode_count
> 0)
21701 unwind
.opcode_count
--;
21702 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21706 /* Pad with "finish" opcodes. */
21708 data
= (data
<< 8) | 0xb0;
21715 /* We get two opcodes "free" in the first word. */
21716 size
= unwind
.opcode_count
- 2;
21720 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21721 if (unwind
.personality_index
!= -1)
21723 as_bad (_("attempt to recreate an unwind entry"));
21727 /* An extra byte is required for the opcode count. */
21728 size
= unwind
.opcode_count
+ 1;
21731 size
= (size
+ 3) >> 2;
21733 as_bad (_("too many unwind opcodes"));
21735 frag_align (2, 0, 0);
21736 record_alignment (now_seg
, 2);
21737 unwind
.table_entry
= expr_build_dot ();
21739 /* Allocate the table entry. */
21740 ptr
= frag_more ((size
<< 2) + 4);
21741 /* PR 13449: Zero the table entries in case some of them are not used. */
21742 memset (ptr
, 0, (size
<< 2) + 4);
21743 where
= frag_now_fix () - ((size
<< 2) + 4);
21745 switch (unwind
.personality_index
)
21748 /* ??? Should this be a PLT generating relocation? */
21749 /* Custom personality routine. */
21750 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
21751 BFD_RELOC_ARM_PREL31
);
21756 /* Set the first byte to the number of additional words. */
21757 data
= size
> 0 ? size
- 1 : 0;
21761 /* ABI defined personality routines. */
21763 /* Three opcodes bytes are packed into the first word. */
21770 /* The size and first two opcode bytes go in the first word. */
21771 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
21776 /* Should never happen. */
21780 /* Pack the opcodes into words (MSB first), reversing the list at the same
21782 while (unwind
.opcode_count
> 0)
21786 md_number_to_chars (ptr
, data
, 4);
21791 unwind
.opcode_count
--;
21793 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21796 /* Finish off the last word. */
21799 /* Pad with "finish" opcodes. */
21801 data
= (data
<< 8) | 0xb0;
21803 md_number_to_chars (ptr
, data
, 4);
21808 /* Add an empty descriptor if there is no user-specified data. */
21809 ptr
= frag_more (4);
21810 md_number_to_chars (ptr
, 0, 4);
21817 /* Initialize the DWARF-2 unwind information for this procedure. */
21820 tc_arm_frame_initial_instructions (void)
21822 cfi_add_CFA_def_cfa (REG_SP
, 0);
21824 #endif /* OBJ_ELF */
21826 /* Convert REGNAME to a DWARF-2 register number. */
21829 tc_arm_regname_to_dw2regnum (char *regname
)
21831 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
21835 /* PR 16694: Allow VFP registers as well. */
21836 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
21840 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
21849 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
21853 exp
.X_op
= O_secrel
;
21854 exp
.X_add_symbol
= symbol
;
21855 exp
.X_add_number
= 0;
21856 emit_expr (&exp
, size
);
21860 /* MD interface: Symbol and relocation handling. */
21862 /* Return the address within the segment that a PC-relative fixup is
21863 relative to. For ARM, PC-relative fixups applied to instructions
21864 are generally relative to the location of the fixup plus 8 bytes.
21865 Thumb branches are offset by 4, and Thumb loads relative to PC
21866 require special handling. */
21869 md_pcrel_from_section (fixS
* fixP
, segT seg
)
21871 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21873 /* If this is pc-relative and we are going to emit a relocation
21874 then we just want to put out any pipeline compensation that the linker
21875 will need. Otherwise we want to use the calculated base.
21876 For WinCE we skip the bias for externals as well, since this
21877 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21879 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
21880 || (arm_force_relocation (fixP
)
21882 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
21888 switch (fixP
->fx_r_type
)
21890 /* PC relative addressing on the Thumb is slightly odd as the
21891 bottom two bits of the PC are forced to zero for the
21892 calculation. This happens *after* application of the
21893 pipeline offset. However, Thumb adrl already adjusts for
21894 this, so we need not do it again. */
21895 case BFD_RELOC_ARM_THUMB_ADD
:
21898 case BFD_RELOC_ARM_THUMB_OFFSET
:
21899 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
21900 case BFD_RELOC_ARM_T32_ADD_PC12
:
21901 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
21902 return (base
+ 4) & ~3;
21904 /* Thumb branches are simply offset by +4. */
21905 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
21906 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
21907 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
21908 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21909 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21912 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21914 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21915 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21916 && ARM_IS_FUNC (fixP
->fx_addsy
)
21917 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21918 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21921 /* BLX is like branches above, but forces the low two bits of PC to
21923 case BFD_RELOC_THUMB_PCREL_BLX
:
21925 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21926 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21927 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21928 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21929 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21930 return (base
+ 4) & ~3;
21932 /* ARM mode branches are offset by +8. However, the Windows CE
21933 loader expects the relocation not to take this into account. */
21934 case BFD_RELOC_ARM_PCREL_BLX
:
21936 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21937 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21938 && ARM_IS_FUNC (fixP
->fx_addsy
)
21939 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21940 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21943 case BFD_RELOC_ARM_PCREL_CALL
:
21945 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21946 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21947 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21948 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21949 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21952 case BFD_RELOC_ARM_PCREL_BRANCH
:
21953 case BFD_RELOC_ARM_PCREL_JUMP
:
21954 case BFD_RELOC_ARM_PLT32
:
21956 /* When handling fixups immediately, because we have already
21957 discovered the value of a symbol, or the address of the frag involved
21958 we must account for the offset by +8, as the OS loader will never see the reloc.
21959 see fixup_segment() in write.c
21960 The S_IS_EXTERNAL test handles the case of global symbols.
21961 Those need the calculated base, not just the pipe compensation the linker will need. */
21963 && fixP
->fx_addsy
!= NULL
21964 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21965 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
21973 /* ARM mode loads relative to PC are also offset by +8. Unlike
21974 branches, the Windows CE loader *does* expect the relocation
21975 to take this into account. */
21976 case BFD_RELOC_ARM_OFFSET_IMM
:
21977 case BFD_RELOC_ARM_OFFSET_IMM8
:
21978 case BFD_RELOC_ARM_HWLITERAL
:
21979 case BFD_RELOC_ARM_LITERAL
:
21980 case BFD_RELOC_ARM_CP_OFF_IMM
:
21984 /* Other PC-relative relocations are un-offset. */
21990 static bfd_boolean flag_warn_syms
= TRUE
;
21993 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
21995 /* PR 18347 - Warn if the user attempts to create a symbol with the same
21996 name as an ARM instruction. Whilst strictly speaking it is allowed, it
21997 does mean that the resulting code might be very confusing to the reader.
21998 Also this warning can be triggered if the user omits an operand before
21999 an immediate address, eg:
22003 GAS treats this as an assignment of the value of the symbol foo to a
22004 symbol LDR, and so (without this code) it will not issue any kind of
22005 warning or error message.
22007 Note - ARM instructions are case-insensitive but the strings in the hash
22008 table are all stored in lower case, so we must first ensure that name is
22010 if (flag_warn_syms
&& arm_ops_hsh
)
22012 char * nbuf
= strdup (name
);
22015 for (p
= nbuf
; *p
; p
++)
22017 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
22019 static struct hash_control
* already_warned
= NULL
;
22021 if (already_warned
== NULL
)
22022 already_warned
= hash_new ();
22023 /* Only warn about the symbol once. To keep the code
22024 simple we let hash_insert do the lookup for us. */
22025 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
22026 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
22035 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22036 Otherwise we have no need to default values of symbols. */
22039 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
22042 if (name
[0] == '_' && name
[1] == 'G'
22043 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
22047 if (symbol_find (name
))
22048 as_bad (_("GOT already in the symbol table"));
22050 GOT_symbol
= symbol_new (name
, undefined_section
,
22051 (valueT
) 0, & zero_address_frag
);
22061 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22062 computed as two separate immediate values, added together. We
22063 already know that this value cannot be computed by just one ARM
22066 static unsigned int
22067 validate_immediate_twopart (unsigned int val
,
22068 unsigned int * highpart
)
22073 for (i
= 0; i
< 32; i
+= 2)
22074 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22080 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22082 else if (a
& 0xff0000)
22084 if (a
& 0xff000000)
22086 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22090 gas_assert (a
& 0xff000000);
22091 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22094 return (a
& 0xff) | (i
<< 7);
22101 validate_offset_imm (unsigned int val
, int hwse
)
22103 if ((hwse
&& val
> 255) || val
> 4095)
22108 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22109 negative immediate constant by altering the instruction. A bit of
22114 by inverting the second operand, and
22117 by negating the second operand. */
22120 negate_data_op (unsigned long * instruction
,
22121 unsigned long value
)
22124 unsigned long negated
, inverted
;
22126 negated
= encode_arm_immediate (-value
);
22127 inverted
= encode_arm_immediate (~value
);
22129 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22132 /* First negates. */
22133 case OPCODE_SUB
: /* ADD <-> SUB */
22134 new_inst
= OPCODE_ADD
;
22139 new_inst
= OPCODE_SUB
;
22143 case OPCODE_CMP
: /* CMP <-> CMN */
22144 new_inst
= OPCODE_CMN
;
22149 new_inst
= OPCODE_CMP
;
22153 /* Now Inverted ops. */
22154 case OPCODE_MOV
: /* MOV <-> MVN */
22155 new_inst
= OPCODE_MVN
;
22160 new_inst
= OPCODE_MOV
;
22164 case OPCODE_AND
: /* AND <-> BIC */
22165 new_inst
= OPCODE_BIC
;
22170 new_inst
= OPCODE_AND
;
22174 case OPCODE_ADC
: /* ADC <-> SBC */
22175 new_inst
= OPCODE_SBC
;
22180 new_inst
= OPCODE_ADC
;
22184 /* We cannot do anything. */
22189 if (value
== (unsigned) FAIL
)
22192 *instruction
&= OPCODE_MASK
;
22193 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22197 /* Like negate_data_op, but for Thumb-2. */
22199 static unsigned int
22200 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22204 unsigned int negated
, inverted
;
22206 negated
= encode_thumb32_immediate (-value
);
22207 inverted
= encode_thumb32_immediate (~value
);
22209 rd
= (*instruction
>> 8) & 0xf;
22210 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22213 /* ADD <-> SUB. Includes CMP <-> CMN. */
22214 case T2_OPCODE_SUB
:
22215 new_inst
= T2_OPCODE_ADD
;
22219 case T2_OPCODE_ADD
:
22220 new_inst
= T2_OPCODE_SUB
;
22224 /* ORR <-> ORN. Includes MOV <-> MVN. */
22225 case T2_OPCODE_ORR
:
22226 new_inst
= T2_OPCODE_ORN
;
22230 case T2_OPCODE_ORN
:
22231 new_inst
= T2_OPCODE_ORR
;
22235 /* AND <-> BIC. TST has no inverted equivalent. */
22236 case T2_OPCODE_AND
:
22237 new_inst
= T2_OPCODE_BIC
;
22244 case T2_OPCODE_BIC
:
22245 new_inst
= T2_OPCODE_AND
;
22250 case T2_OPCODE_ADC
:
22251 new_inst
= T2_OPCODE_SBC
;
22255 case T2_OPCODE_SBC
:
22256 new_inst
= T2_OPCODE_ADC
;
22260 /* We cannot do anything. */
22265 if (value
== (unsigned int)FAIL
)
22268 *instruction
&= T2_OPCODE_MASK
;
22269 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22273 /* Read a 32-bit thumb instruction from buf. */
22274 static unsigned long
22275 get_thumb32_insn (char * buf
)
22277 unsigned long insn
;
22278 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22279 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22285 /* We usually want to set the low bit on the address of thumb function
22286 symbols. In particular .word foo - . should have the low bit set.
22287 Generic code tries to fold the difference of two symbols to
22288 a constant. Prevent this and force a relocation when the first symbols
22289 is a thumb function. */
22292 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22294 if (op
== O_subtract
22295 && l
->X_op
== O_symbol
22296 && r
->X_op
== O_symbol
22297 && THUMB_IS_FUNC (l
->X_add_symbol
))
22299 l
->X_op
= O_subtract
;
22300 l
->X_op_symbol
= r
->X_add_symbol
;
22301 l
->X_add_number
-= r
->X_add_number
;
22305 /* Process as normal. */
22309 /* Encode Thumb2 unconditional branches and calls. The encoding
22310 for the 2 are identical for the immediate values. */
22313 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22315 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22318 addressT S
, I1
, I2
, lo
, hi
;
22320 S
= (value
>> 24) & 0x01;
22321 I1
= (value
>> 23) & 0x01;
22322 I2
= (value
>> 22) & 0x01;
22323 hi
= (value
>> 12) & 0x3ff;
22324 lo
= (value
>> 1) & 0x7ff;
22325 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22326 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22327 newval
|= (S
<< 10) | hi
;
22328 newval2
&= ~T2I1I2MASK
;
22329 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22330 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22331 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22335 md_apply_fix (fixS
* fixP
,
22339 offsetT value
= * valP
;
22341 unsigned int newimm
;
22342 unsigned long temp
;
22344 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22346 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22348 /* Note whether this will delete the relocation. */
22350 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22353 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22354 consistency with the behaviour on 32-bit hosts. Remember value
22356 value
&= 0xffffffff;
22357 value
^= 0x80000000;
22358 value
-= 0x80000000;
22361 fixP
->fx_addnumber
= value
;
22363 /* Same treatment for fixP->fx_offset. */
22364 fixP
->fx_offset
&= 0xffffffff;
22365 fixP
->fx_offset
^= 0x80000000;
22366 fixP
->fx_offset
-= 0x80000000;
22368 switch (fixP
->fx_r_type
)
22370 case BFD_RELOC_NONE
:
22371 /* This will need to go in the object file. */
22375 case BFD_RELOC_ARM_IMMEDIATE
:
22376 /* We claim that this fixup has been processed here,
22377 even if in fact we generate an error because we do
22378 not have a reloc for it, so tc_gen_reloc will reject it. */
22381 if (fixP
->fx_addsy
)
22383 const char *msg
= 0;
22385 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22386 msg
= _("undefined symbol %s used as an immediate value");
22387 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22388 msg
= _("symbol %s is in a different section");
22389 else if (S_IS_WEAK (fixP
->fx_addsy
))
22390 msg
= _("symbol %s is weak and may be overridden later");
22394 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22395 msg
, S_GET_NAME (fixP
->fx_addsy
));
22400 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22402 /* If the offset is negative, we should use encoding A2 for ADR. */
22403 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22404 newimm
= negate_data_op (&temp
, value
);
22407 newimm
= encode_arm_immediate (value
);
22409 /* If the instruction will fail, see if we can fix things up by
22410 changing the opcode. */
22411 if (newimm
== (unsigned int) FAIL
)
22412 newimm
= negate_data_op (&temp
, value
);
22415 if (newimm
== (unsigned int) FAIL
)
22417 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22418 _("invalid constant (%lx) after fixup"),
22419 (unsigned long) value
);
22423 newimm
|= (temp
& 0xfffff000);
22424 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22427 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22429 unsigned int highpart
= 0;
22430 unsigned int newinsn
= 0xe1a00000; /* nop. */
22432 if (fixP
->fx_addsy
)
22434 const char *msg
= 0;
22436 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22437 msg
= _("undefined symbol %s used as an immediate value");
22438 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22439 msg
= _("symbol %s is in a different section");
22440 else if (S_IS_WEAK (fixP
->fx_addsy
))
22441 msg
= _("symbol %s is weak and may be overridden later");
22445 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22446 msg
, S_GET_NAME (fixP
->fx_addsy
));
22451 newimm
= encode_arm_immediate (value
);
22452 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22454 /* If the instruction will fail, see if we can fix things up by
22455 changing the opcode. */
22456 if (newimm
== (unsigned int) FAIL
22457 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22459 /* No ? OK - try using two ADD instructions to generate
22461 newimm
= validate_immediate_twopart (value
, & highpart
);
22463 /* Yes - then make sure that the second instruction is
22465 if (newimm
!= (unsigned int) FAIL
)
22467 /* Still No ? Try using a negated value. */
22468 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22469 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22470 /* Otherwise - give up. */
22473 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22474 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22479 /* Replace the first operand in the 2nd instruction (which
22480 is the PC) with the destination register. We have
22481 already added in the PC in the first instruction and we
22482 do not want to do it again. */
22483 newinsn
&= ~ 0xf0000;
22484 newinsn
|= ((newinsn
& 0x0f000) << 4);
22487 newimm
|= (temp
& 0xfffff000);
22488 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22490 highpart
|= (newinsn
& 0xfffff000);
22491 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
22495 case BFD_RELOC_ARM_OFFSET_IMM
:
22496 if (!fixP
->fx_done
&& seg
->use_rela_p
)
22499 case BFD_RELOC_ARM_LITERAL
:
22505 if (validate_offset_imm (value
, 0) == FAIL
)
22507 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
22508 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22509 _("invalid literal constant: pool needs to be closer"));
22511 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22512 _("bad immediate value for offset (%ld)"),
22517 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22519 newval
&= 0xfffff000;
22522 newval
&= 0xff7ff000;
22523 newval
|= value
| (sign
? INDEX_UP
: 0);
22525 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22528 case BFD_RELOC_ARM_OFFSET_IMM8
:
22529 case BFD_RELOC_ARM_HWLITERAL
:
22535 if (validate_offset_imm (value
, 1) == FAIL
)
22537 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
22538 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22539 _("invalid literal constant: pool needs to be closer"));
22541 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22542 _("bad immediate value for 8-bit offset (%ld)"),
22547 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22549 newval
&= 0xfffff0f0;
22552 newval
&= 0xff7ff0f0;
22553 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
22555 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22558 case BFD_RELOC_ARM_T32_OFFSET_U8
:
22559 if (value
< 0 || value
> 1020 || value
% 4 != 0)
22560 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22561 _("bad immediate value for offset (%ld)"), (long) value
);
22564 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
22566 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
22569 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22570 /* This is a complicated relocation used for all varieties of Thumb32
22571 load/store instruction with immediate offset:
22573 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22574 *4, optional writeback(W)
22575 (doubleword load/store)
22577 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22578 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22579 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22580 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22581 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22583 Uppercase letters indicate bits that are already encoded at
22584 this point. Lowercase letters are our problem. For the
22585 second block of instructions, the secondary opcode nybble
22586 (bits 8..11) is present, and bit 23 is zero, even if this is
22587 a PC-relative operation. */
22588 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22590 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
22592 if ((newval
& 0xf0000000) == 0xe0000000)
22594 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22596 newval
|= (1 << 23);
22599 if (value
% 4 != 0)
22601 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22602 _("offset not a multiple of 4"));
22608 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22609 _("offset out of range"));
22614 else if ((newval
& 0x000f0000) == 0x000f0000)
22616 /* PC-relative, 12-bit offset. */
22618 newval
|= (1 << 23);
22623 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22624 _("offset out of range"));
22629 else if ((newval
& 0x00000100) == 0x00000100)
22631 /* Writeback: 8-bit, +/- offset. */
22633 newval
|= (1 << 9);
22638 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22639 _("offset out of range"));
22644 else if ((newval
& 0x00000f00) == 0x00000e00)
22646 /* T-instruction: positive 8-bit offset. */
22647 if (value
< 0 || value
> 0xff)
22649 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22650 _("offset out of range"));
22658 /* Positive 12-bit or negative 8-bit offset. */
22662 newval
|= (1 << 23);
22672 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22673 _("offset out of range"));
22680 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
22681 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
22684 case BFD_RELOC_ARM_SHIFT_IMM
:
22685 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22686 if (((unsigned long) value
) > 32
22688 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
22690 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22691 _("shift expression is too large"));
22696 /* Shifts of zero must be done as lsl. */
22698 else if (value
== 32)
22700 newval
&= 0xfffff07f;
22701 newval
|= (value
& 0x1f) << 7;
22702 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22705 case BFD_RELOC_ARM_T32_IMMEDIATE
:
22706 case BFD_RELOC_ARM_T32_ADD_IMM
:
22707 case BFD_RELOC_ARM_T32_IMM12
:
22708 case BFD_RELOC_ARM_T32_ADD_PC12
:
22709 /* We claim that this fixup has been processed here,
22710 even if in fact we generate an error because we do
22711 not have a reloc for it, so tc_gen_reloc will reject it. */
22715 && ! S_IS_DEFINED (fixP
->fx_addsy
))
22717 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22718 _("undefined symbol %s used as an immediate value"),
22719 S_GET_NAME (fixP
->fx_addsy
));
22723 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22725 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
22728 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
22729 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22731 newimm
= encode_thumb32_immediate (value
);
22732 if (newimm
== (unsigned int) FAIL
)
22733 newimm
= thumb32_negate_data_op (&newval
, value
);
22735 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
22736 && newimm
== (unsigned int) FAIL
)
22738 /* Turn add/sum into addw/subw. */
22739 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22740 newval
= (newval
& 0xfeffffff) | 0x02000000;
22741 /* No flat 12-bit imm encoding for addsw/subsw. */
22742 if ((newval
& 0x00100000) == 0)
22744 /* 12 bit immediate for addw/subw. */
22748 newval
^= 0x00a00000;
22751 newimm
= (unsigned int) FAIL
;
22757 if (newimm
== (unsigned int)FAIL
)
22759 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22760 _("invalid constant (%lx) after fixup"),
22761 (unsigned long) value
);
22765 newval
|= (newimm
& 0x800) << 15;
22766 newval
|= (newimm
& 0x700) << 4;
22767 newval
|= (newimm
& 0x0ff);
22769 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
22770 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
22773 case BFD_RELOC_ARM_SMC
:
22774 if (((unsigned long) value
) > 0xffff)
22775 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22776 _("invalid smc expression"));
22777 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22778 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22779 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22782 case BFD_RELOC_ARM_HVC
:
22783 if (((unsigned long) value
) > 0xffff)
22784 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22785 _("invalid hvc expression"));
22786 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22787 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22788 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22791 case BFD_RELOC_ARM_SWI
:
22792 if (fixP
->tc_fix_data
!= 0)
22794 if (((unsigned long) value
) > 0xff)
22795 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22796 _("invalid swi expression"));
22797 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22799 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22803 if (((unsigned long) value
) > 0x00ffffff)
22804 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22805 _("invalid swi expression"));
22806 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22808 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22812 case BFD_RELOC_ARM_MULTI
:
22813 if (((unsigned long) value
) > 0xffff)
22814 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22815 _("invalid expression in load/store multiple"));
22816 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
22817 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22821 case BFD_RELOC_ARM_PCREL_CALL
:
22823 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22825 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22826 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22827 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22828 /* Flip the bl to blx. This is a simple flip
22829 bit here because we generate PCREL_CALL for
22830 unconditional bls. */
22832 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22833 newval
= newval
| 0x10000000;
22834 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22840 goto arm_branch_common
;
22842 case BFD_RELOC_ARM_PCREL_JUMP
:
22843 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22845 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22846 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22847 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22849 /* This would map to a bl<cond>, b<cond>,
22850 b<always> to a Thumb function. We
22851 need to force a relocation for this particular
22853 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22857 case BFD_RELOC_ARM_PLT32
:
22859 case BFD_RELOC_ARM_PCREL_BRANCH
:
22861 goto arm_branch_common
;
22863 case BFD_RELOC_ARM_PCREL_BLX
:
22866 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22868 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22869 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22870 && ARM_IS_FUNC (fixP
->fx_addsy
))
22872 /* Flip the blx to a bl and warn. */
22873 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
22874 newval
= 0xeb000000;
22875 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
22876 _("blx to '%s' an ARM ISA state function changed to bl"),
22878 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22884 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
22885 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
22889 /* We are going to store value (shifted right by two) in the
22890 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22891 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22892 also be be clear. */
22894 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22895 _("misaligned branch destination"));
22896 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
22897 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
22898 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22900 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22902 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22903 newval
|= (value
>> 2) & 0x00ffffff;
22904 /* Set the H bit on BLX instructions. */
22908 newval
|= 0x01000000;
22910 newval
&= ~0x01000000;
22912 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22916 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
22917 /* CBZ can only branch forward. */
22919 /* Attempts to use CBZ to branch to the next instruction
22920 (which, strictly speaking, are prohibited) will be turned into
22923 FIXME: It may be better to remove the instruction completely and
22924 perform relaxation. */
22927 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22928 newval
= 0xbf00; /* NOP encoding T1 */
22929 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22934 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22936 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22938 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22939 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
22940 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22945 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
22946 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
22947 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22949 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22951 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22952 newval
|= (value
& 0x1ff) >> 1;
22953 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22957 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
22958 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
22959 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22961 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22963 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22964 newval
|= (value
& 0xfff) >> 1;
22965 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22969 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22971 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22972 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22973 && ARM_IS_FUNC (fixP
->fx_addsy
)
22974 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22976 /* Force a relocation for a branch 20 bits wide. */
22979 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
22980 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22981 _("conditional branch out of range"));
22983 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22986 addressT S
, J1
, J2
, lo
, hi
;
22988 S
= (value
& 0x00100000) >> 20;
22989 J2
= (value
& 0x00080000) >> 19;
22990 J1
= (value
& 0x00040000) >> 18;
22991 hi
= (value
& 0x0003f000) >> 12;
22992 lo
= (value
& 0x00000ffe) >> 1;
22994 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22995 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22996 newval
|= (S
<< 10) | hi
;
22997 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
22998 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22999 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23003 case BFD_RELOC_THUMB_PCREL_BLX
:
23004 /* If there is a blx from a thumb state function to
23005 another thumb function flip this to a bl and warn
23009 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23010 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23011 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23013 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23014 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23015 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23017 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23018 newval
= newval
| 0x1000;
23019 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23020 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23025 goto thumb_bl_common
;
23027 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23028 /* A bl from Thumb state ISA to an internal ARM state function
23029 is converted to a blx. */
23031 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23032 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23033 && ARM_IS_FUNC (fixP
->fx_addsy
)
23034 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23036 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23037 newval
= newval
& ~0x1000;
23038 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23039 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
23045 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23046 /* For a BLX instruction, make sure that the relocation is rounded up
23047 to a word boundary. This follows the semantics of the instruction
23048 which specifies that bit 1 of the target address will come from bit
23049 1 of the base address. */
23050 value
= (value
+ 3) & ~ 3;
23053 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
23054 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23055 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23058 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
23060 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
23061 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23062 else if ((value
& ~0x1ffffff)
23063 && ((value
& ~0x1ffffff) != ~0x1ffffff))
23064 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23065 _("Thumb2 branch out of range"));
23068 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23069 encode_thumb2_b_bl_offset (buf
, value
);
23073 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23074 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
23075 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23077 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23078 encode_thumb2_b_bl_offset (buf
, value
);
23083 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23088 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23089 md_number_to_chars (buf
, value
, 2);
23093 case BFD_RELOC_ARM_TLS_CALL
:
23094 case BFD_RELOC_ARM_THM_TLS_CALL
:
23095 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23096 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23097 case BFD_RELOC_ARM_TLS_GOTDESC
:
23098 case BFD_RELOC_ARM_TLS_GD32
:
23099 case BFD_RELOC_ARM_TLS_LE32
:
23100 case BFD_RELOC_ARM_TLS_IE32
:
23101 case BFD_RELOC_ARM_TLS_LDM32
:
23102 case BFD_RELOC_ARM_TLS_LDO32
:
23103 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23106 case BFD_RELOC_ARM_GOT32
:
23107 case BFD_RELOC_ARM_GOTOFF
:
23110 case BFD_RELOC_ARM_GOT_PREL
:
23111 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23112 md_number_to_chars (buf
, value
, 4);
23115 case BFD_RELOC_ARM_TARGET2
:
23116 /* TARGET2 is not partial-inplace, so we need to write the
23117 addend here for REL targets, because it won't be written out
23118 during reloc processing later. */
23119 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23120 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23124 case BFD_RELOC_RVA
:
23126 case BFD_RELOC_ARM_TARGET1
:
23127 case BFD_RELOC_ARM_ROSEGREL32
:
23128 case BFD_RELOC_ARM_SBREL32
:
23129 case BFD_RELOC_32_PCREL
:
23131 case BFD_RELOC_32_SECREL
:
23133 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23135 /* For WinCE we only do this for pcrel fixups. */
23136 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23138 md_number_to_chars (buf
, value
, 4);
23142 case BFD_RELOC_ARM_PREL31
:
23143 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23145 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23146 if ((value
^ (value
>> 1)) & 0x40000000)
23148 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23149 _("rel31 relocation overflow"));
23151 newval
|= value
& 0x7fffffff;
23152 md_number_to_chars (buf
, newval
, 4);
23157 case BFD_RELOC_ARM_CP_OFF_IMM
:
23158 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23159 if (value
< -1023 || value
> 1023 || (value
& 3))
23160 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23161 _("co-processor offset out of range"));
23166 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23167 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23168 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23170 newval
= get_thumb32_insn (buf
);
23172 newval
&= 0xffffff00;
23175 newval
&= 0xff7fff00;
23176 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23178 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23179 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23180 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23182 put_thumb32_insn (buf
, newval
);
23185 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23186 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23187 if (value
< -255 || value
> 255)
23188 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23189 _("co-processor offset out of range"));
23191 goto cp_off_common
;
23193 case BFD_RELOC_ARM_THUMB_OFFSET
:
23194 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23195 /* Exactly what ranges, and where the offset is inserted depends
23196 on the type of instruction, we can establish this from the
23198 switch (newval
>> 12)
23200 case 4: /* PC load. */
23201 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23202 forced to zero for these loads; md_pcrel_from has already
23203 compensated for this. */
23205 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23206 _("invalid offset, target not word aligned (0x%08lX)"),
23207 (((unsigned long) fixP
->fx_frag
->fr_address
23208 + (unsigned long) fixP
->fx_where
) & ~3)
23209 + (unsigned long) value
);
23211 if (value
& ~0x3fc)
23212 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23213 _("invalid offset, value too big (0x%08lX)"),
23216 newval
|= value
>> 2;
23219 case 9: /* SP load/store. */
23220 if (value
& ~0x3fc)
23221 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23222 _("invalid offset, value too big (0x%08lX)"),
23224 newval
|= value
>> 2;
23227 case 6: /* Word load/store. */
23229 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23230 _("invalid offset, value too big (0x%08lX)"),
23232 newval
|= value
<< 4; /* 6 - 2. */
23235 case 7: /* Byte load/store. */
23237 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23238 _("invalid offset, value too big (0x%08lX)"),
23240 newval
|= value
<< 6;
23243 case 8: /* Halfword load/store. */
23245 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23246 _("invalid offset, value too big (0x%08lX)"),
23248 newval
|= value
<< 5; /* 6 - 1. */
23252 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23253 "Unable to process relocation for thumb opcode: %lx",
23254 (unsigned long) newval
);
23257 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23260 case BFD_RELOC_ARM_THUMB_ADD
:
23261 /* This is a complicated relocation, since we use it for all of
23262 the following immediate relocations:
23266 9bit ADD/SUB SP word-aligned
23267 10bit ADD PC/SP word-aligned
23269 The type of instruction being processed is encoded in the
23276 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23278 int rd
= (newval
>> 4) & 0xf;
23279 int rs
= newval
& 0xf;
23280 int subtract
= !!(newval
& 0x8000);
23282 /* Check for HI regs, only very restricted cases allowed:
23283 Adjusting SP, and using PC or SP to get an address. */
23284 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23285 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23286 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23287 _("invalid Hi register with immediate"));
23289 /* If value is negative, choose the opposite instruction. */
23293 subtract
= !subtract
;
23295 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23296 _("immediate value out of range"));
23301 if (value
& ~0x1fc)
23302 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23303 _("invalid immediate for stack address calculation"));
23304 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23305 newval
|= value
>> 2;
23307 else if (rs
== REG_PC
|| rs
== REG_SP
)
23309 /* PR gas/18541. If the addition is for a defined symbol
23310 within range of an ADR instruction then accept it. */
23313 && fixP
->fx_addsy
!= NULL
)
23317 if (! S_IS_DEFINED (fixP
->fx_addsy
)
23318 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
23319 || S_IS_WEAK (fixP
->fx_addsy
))
23321 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23322 _("address calculation needs a strongly defined nearby symbol"));
23326 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23328 /* Round up to the next 4-byte boundary. */
23333 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
23337 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23338 _("symbol too far away"));
23348 if (subtract
|| value
& ~0x3fc)
23349 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23350 _("invalid immediate for address calculation (value = 0x%08lX)"),
23351 (unsigned long) (subtract
? - value
: value
));
23352 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
23354 newval
|= value
>> 2;
23359 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23360 _("immediate value out of range"));
23361 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
23362 newval
|= (rd
<< 8) | value
;
23367 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23368 _("immediate value out of range"));
23369 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
23370 newval
|= rd
| (rs
<< 3) | (value
<< 6);
23373 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23376 case BFD_RELOC_ARM_THUMB_IMM
:
23377 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23378 if (value
< 0 || value
> 255)
23379 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23380 _("invalid immediate: %ld is out of range"),
23383 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23386 case BFD_RELOC_ARM_THUMB_SHIFT
:
23387 /* 5bit shift value (0..32). LSL cannot take 32. */
23388 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
23389 temp
= newval
& 0xf800;
23390 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
23391 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23392 _("invalid shift value: %ld"), (long) value
);
23393 /* Shifts of zero must be encoded as LSL. */
23395 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
23396 /* Shifts of 32 are encoded as zero. */
23397 else if (value
== 32)
23399 newval
|= value
<< 6;
23400 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23403 case BFD_RELOC_VTABLE_INHERIT
:
23404 case BFD_RELOC_VTABLE_ENTRY
:
23408 case BFD_RELOC_ARM_MOVW
:
23409 case BFD_RELOC_ARM_MOVT
:
23410 case BFD_RELOC_ARM_THUMB_MOVW
:
23411 case BFD_RELOC_ARM_THUMB_MOVT
:
23412 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23414 /* REL format relocations are limited to a 16-bit addend. */
23415 if (!fixP
->fx_done
)
23417 if (value
< -0x8000 || value
> 0x7fff)
23418 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23419 _("offset out of range"));
23421 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23422 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23427 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23428 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23430 newval
= get_thumb32_insn (buf
);
23431 newval
&= 0xfbf08f00;
23432 newval
|= (value
& 0xf000) << 4;
23433 newval
|= (value
& 0x0800) << 15;
23434 newval
|= (value
& 0x0700) << 4;
23435 newval
|= (value
& 0x00ff);
23436 put_thumb32_insn (buf
, newval
);
23440 newval
= md_chars_to_number (buf
, 4);
23441 newval
&= 0xfff0f000;
23442 newval
|= value
& 0x0fff;
23443 newval
|= (value
& 0xf000) << 4;
23444 md_number_to_chars (buf
, newval
, 4);
23449 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23450 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23451 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23452 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23453 gas_assert (!fixP
->fx_done
);
23456 bfd_boolean is_mov
;
23457 bfd_vma encoded_addend
= value
;
23459 /* Check that addend can be encoded in instruction. */
23460 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
23461 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23462 _("the offset 0x%08lX is not representable"),
23463 (unsigned long) encoded_addend
);
23465 /* Extract the instruction. */
23466 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
23467 is_mov
= (insn
& 0xf800) == 0x2000;
23472 if (!seg
->use_rela_p
)
23473 insn
|= encoded_addend
;
23479 /* Extract the instruction. */
23480 /* Encoding is the following
23485 /* The following conditions must be true :
23490 rd
= (insn
>> 4) & 0xf;
23492 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
23493 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23494 _("Unable to process relocation for thumb opcode: %lx"),
23495 (unsigned long) insn
);
23497 /* Encode as ADD immediate8 thumb 1 code. */
23498 insn
= 0x3000 | (rd
<< 8);
23500 /* Place the encoded addend into the first 8 bits of the
23502 if (!seg
->use_rela_p
)
23503 insn
|= encoded_addend
;
23506 /* Update the instruction. */
23507 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
23511 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23512 case BFD_RELOC_ARM_ALU_PC_G0
:
23513 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23514 case BFD_RELOC_ARM_ALU_PC_G1
:
23515 case BFD_RELOC_ARM_ALU_PC_G2
:
23516 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23517 case BFD_RELOC_ARM_ALU_SB_G0
:
23518 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23519 case BFD_RELOC_ARM_ALU_SB_G1
:
23520 case BFD_RELOC_ARM_ALU_SB_G2
:
23521 gas_assert (!fixP
->fx_done
);
23522 if (!seg
->use_rela_p
)
23525 bfd_vma encoded_addend
;
23526 bfd_vma addend_abs
= abs (value
);
23528 /* Check that the absolute value of the addend can be
23529 expressed as an 8-bit constant plus a rotation. */
23530 encoded_addend
= encode_arm_immediate (addend_abs
);
23531 if (encoded_addend
== (unsigned int) FAIL
)
23532 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23533 _("the offset 0x%08lX is not representable"),
23534 (unsigned long) addend_abs
);
23536 /* Extract the instruction. */
23537 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23539 /* If the addend is positive, use an ADD instruction.
23540 Otherwise use a SUB. Take care not to destroy the S bit. */
23541 insn
&= 0xff1fffff;
23547 /* Place the encoded addend into the first 12 bits of the
23549 insn
&= 0xfffff000;
23550 insn
|= encoded_addend
;
23552 /* Update the instruction. */
23553 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23557 case BFD_RELOC_ARM_LDR_PC_G0
:
23558 case BFD_RELOC_ARM_LDR_PC_G1
:
23559 case BFD_RELOC_ARM_LDR_PC_G2
:
23560 case BFD_RELOC_ARM_LDR_SB_G0
:
23561 case BFD_RELOC_ARM_LDR_SB_G1
:
23562 case BFD_RELOC_ARM_LDR_SB_G2
:
23563 gas_assert (!fixP
->fx_done
);
23564 if (!seg
->use_rela_p
)
23567 bfd_vma addend_abs
= abs (value
);
23569 /* Check that the absolute value of the addend can be
23570 encoded in 12 bits. */
23571 if (addend_abs
>= 0x1000)
23572 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23573 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23574 (unsigned long) addend_abs
);
23576 /* Extract the instruction. */
23577 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23579 /* If the addend is negative, clear bit 23 of the instruction.
23580 Otherwise set it. */
23582 insn
&= ~(1 << 23);
23586 /* Place the absolute value of the addend into the first 12 bits
23587 of the instruction. */
23588 insn
&= 0xfffff000;
23589 insn
|= addend_abs
;
23591 /* Update the instruction. */
23592 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23596 case BFD_RELOC_ARM_LDRS_PC_G0
:
23597 case BFD_RELOC_ARM_LDRS_PC_G1
:
23598 case BFD_RELOC_ARM_LDRS_PC_G2
:
23599 case BFD_RELOC_ARM_LDRS_SB_G0
:
23600 case BFD_RELOC_ARM_LDRS_SB_G1
:
23601 case BFD_RELOC_ARM_LDRS_SB_G2
:
23602 gas_assert (!fixP
->fx_done
);
23603 if (!seg
->use_rela_p
)
23606 bfd_vma addend_abs
= abs (value
);
23608 /* Check that the absolute value of the addend can be
23609 encoded in 8 bits. */
23610 if (addend_abs
>= 0x100)
23611 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23612 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23613 (unsigned long) addend_abs
);
23615 /* Extract the instruction. */
23616 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23618 /* If the addend is negative, clear bit 23 of the instruction.
23619 Otherwise set it. */
23621 insn
&= ~(1 << 23);
23625 /* Place the first four bits of the absolute value of the addend
23626 into the first 4 bits of the instruction, and the remaining
23627 four into bits 8 .. 11. */
23628 insn
&= 0xfffff0f0;
23629 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
23631 /* Update the instruction. */
23632 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23636 case BFD_RELOC_ARM_LDC_PC_G0
:
23637 case BFD_RELOC_ARM_LDC_PC_G1
:
23638 case BFD_RELOC_ARM_LDC_PC_G2
:
23639 case BFD_RELOC_ARM_LDC_SB_G0
:
23640 case BFD_RELOC_ARM_LDC_SB_G1
:
23641 case BFD_RELOC_ARM_LDC_SB_G2
:
23642 gas_assert (!fixP
->fx_done
);
23643 if (!seg
->use_rela_p
)
23646 bfd_vma addend_abs
= abs (value
);
23648 /* Check that the absolute value of the addend is a multiple of
23649 four and, when divided by four, fits in 8 bits. */
23650 if (addend_abs
& 0x3)
23651 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23652 _("bad offset 0x%08lX (must be word-aligned)"),
23653 (unsigned long) addend_abs
);
23655 if ((addend_abs
>> 2) > 0xff)
23656 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23657 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23658 (unsigned long) addend_abs
);
23660 /* Extract the instruction. */
23661 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23663 /* If the addend is negative, clear bit 23 of the instruction.
23664 Otherwise set it. */
23666 insn
&= ~(1 << 23);
23670 /* Place the addend (divided by four) into the first eight
23671 bits of the instruction. */
23672 insn
&= 0xfffffff0;
23673 insn
|= addend_abs
>> 2;
23675 /* Update the instruction. */
23676 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23680 case BFD_RELOC_ARM_V4BX
:
23681 /* This will need to go in the object file. */
23685 case BFD_RELOC_UNUSED
:
23687 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23688 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
23692 /* Translate internal representation of relocation info to BFD target
23696 tc_gen_reloc (asection
*section
, fixS
*fixp
)
23699 bfd_reloc_code_real_type code
;
23701 reloc
= (arelent
*) xmalloc (sizeof (arelent
));
23703 reloc
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
23704 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
23705 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
23707 if (fixp
->fx_pcrel
)
23709 if (section
->use_rela_p
)
23710 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
23712 fixp
->fx_offset
= reloc
->address
;
23714 reloc
->addend
= fixp
->fx_offset
;
23716 switch (fixp
->fx_r_type
)
23719 if (fixp
->fx_pcrel
)
23721 code
= BFD_RELOC_8_PCREL
;
23726 if (fixp
->fx_pcrel
)
23728 code
= BFD_RELOC_16_PCREL
;
23733 if (fixp
->fx_pcrel
)
23735 code
= BFD_RELOC_32_PCREL
;
23739 case BFD_RELOC_ARM_MOVW
:
23740 if (fixp
->fx_pcrel
)
23742 code
= BFD_RELOC_ARM_MOVW_PCREL
;
23746 case BFD_RELOC_ARM_MOVT
:
23747 if (fixp
->fx_pcrel
)
23749 code
= BFD_RELOC_ARM_MOVT_PCREL
;
23753 case BFD_RELOC_ARM_THUMB_MOVW
:
23754 if (fixp
->fx_pcrel
)
23756 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
23760 case BFD_RELOC_ARM_THUMB_MOVT
:
23761 if (fixp
->fx_pcrel
)
23763 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
23767 case BFD_RELOC_NONE
:
23768 case BFD_RELOC_ARM_PCREL_BRANCH
:
23769 case BFD_RELOC_ARM_PCREL_BLX
:
23770 case BFD_RELOC_RVA
:
23771 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23772 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23773 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23774 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23775 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23776 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23777 case BFD_RELOC_VTABLE_ENTRY
:
23778 case BFD_RELOC_VTABLE_INHERIT
:
23780 case BFD_RELOC_32_SECREL
:
23782 code
= fixp
->fx_r_type
;
23785 case BFD_RELOC_THUMB_PCREL_BLX
:
23787 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23788 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23791 code
= BFD_RELOC_THUMB_PCREL_BLX
;
23794 case BFD_RELOC_ARM_LITERAL
:
23795 case BFD_RELOC_ARM_HWLITERAL
:
23796 /* If this is called then the a literal has
23797 been referenced across a section boundary. */
23798 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23799 _("literal referenced across section boundary"));
23803 case BFD_RELOC_ARM_TLS_CALL
:
23804 case BFD_RELOC_ARM_THM_TLS_CALL
:
23805 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23806 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23807 case BFD_RELOC_ARM_GOT32
:
23808 case BFD_RELOC_ARM_GOTOFF
:
23809 case BFD_RELOC_ARM_GOT_PREL
:
23810 case BFD_RELOC_ARM_PLT32
:
23811 case BFD_RELOC_ARM_TARGET1
:
23812 case BFD_RELOC_ARM_ROSEGREL32
:
23813 case BFD_RELOC_ARM_SBREL32
:
23814 case BFD_RELOC_ARM_PREL31
:
23815 case BFD_RELOC_ARM_TARGET2
:
23816 case BFD_RELOC_ARM_TLS_LDO32
:
23817 case BFD_RELOC_ARM_PCREL_CALL
:
23818 case BFD_RELOC_ARM_PCREL_JUMP
:
23819 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23820 case BFD_RELOC_ARM_ALU_PC_G0
:
23821 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23822 case BFD_RELOC_ARM_ALU_PC_G1
:
23823 case BFD_RELOC_ARM_ALU_PC_G2
:
23824 case BFD_RELOC_ARM_LDR_PC_G0
:
23825 case BFD_RELOC_ARM_LDR_PC_G1
:
23826 case BFD_RELOC_ARM_LDR_PC_G2
:
23827 case BFD_RELOC_ARM_LDRS_PC_G0
:
23828 case BFD_RELOC_ARM_LDRS_PC_G1
:
23829 case BFD_RELOC_ARM_LDRS_PC_G2
:
23830 case BFD_RELOC_ARM_LDC_PC_G0
:
23831 case BFD_RELOC_ARM_LDC_PC_G1
:
23832 case BFD_RELOC_ARM_LDC_PC_G2
:
23833 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23834 case BFD_RELOC_ARM_ALU_SB_G0
:
23835 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23836 case BFD_RELOC_ARM_ALU_SB_G1
:
23837 case BFD_RELOC_ARM_ALU_SB_G2
:
23838 case BFD_RELOC_ARM_LDR_SB_G0
:
23839 case BFD_RELOC_ARM_LDR_SB_G1
:
23840 case BFD_RELOC_ARM_LDR_SB_G2
:
23841 case BFD_RELOC_ARM_LDRS_SB_G0
:
23842 case BFD_RELOC_ARM_LDRS_SB_G1
:
23843 case BFD_RELOC_ARM_LDRS_SB_G2
:
23844 case BFD_RELOC_ARM_LDC_SB_G0
:
23845 case BFD_RELOC_ARM_LDC_SB_G1
:
23846 case BFD_RELOC_ARM_LDC_SB_G2
:
23847 case BFD_RELOC_ARM_V4BX
:
23848 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23849 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23850 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23851 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23852 code
= fixp
->fx_r_type
;
23855 case BFD_RELOC_ARM_TLS_GOTDESC
:
23856 case BFD_RELOC_ARM_TLS_GD32
:
23857 case BFD_RELOC_ARM_TLS_LE32
:
23858 case BFD_RELOC_ARM_TLS_IE32
:
23859 case BFD_RELOC_ARM_TLS_LDM32
:
23860 /* BFD will include the symbol's address in the addend.
23861 But we don't want that, so subtract it out again here. */
23862 if (!S_IS_COMMON (fixp
->fx_addsy
))
23863 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
23864 code
= fixp
->fx_r_type
;
23868 case BFD_RELOC_ARM_IMMEDIATE
:
23869 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23870 _("internal relocation (type: IMMEDIATE) not fixed up"));
23873 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23874 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23875 _("ADRL used for a symbol not defined in the same file"));
23878 case BFD_RELOC_ARM_OFFSET_IMM
:
23879 if (section
->use_rela_p
)
23881 code
= fixp
->fx_r_type
;
23885 if (fixp
->fx_addsy
!= NULL
23886 && !S_IS_DEFINED (fixp
->fx_addsy
)
23887 && S_IS_LOCAL (fixp
->fx_addsy
))
23889 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23890 _("undefined local label `%s'"),
23891 S_GET_NAME (fixp
->fx_addsy
));
23895 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23896 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23903 switch (fixp
->fx_r_type
)
23905 case BFD_RELOC_NONE
: type
= "NONE"; break;
23906 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
23907 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
23908 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
23909 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
23910 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
23911 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
23912 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
23913 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
23914 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
23915 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
23916 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
23917 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
23918 default: type
= _("<unknown>"); break;
23920 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23921 _("cannot represent %s relocation in this object file format"),
23928 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
23930 && fixp
->fx_addsy
== GOT_symbol
)
23932 code
= BFD_RELOC_ARM_GOTPC
;
23933 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
23937 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
23939 if (reloc
->howto
== NULL
)
23941 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23942 _("cannot represent %s relocation in this object file format"),
23943 bfd_get_reloc_code_name (code
));
23947 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23948 vtable entry to be used in the relocation's section offset. */
23949 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
23950 reloc
->address
= fixp
->fx_offset
;
23955 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23958 cons_fix_new_arm (fragS
* frag
,
23962 bfd_reloc_code_real_type reloc
)
23967 FIXME: @@ Should look at CPU word size. */
23971 reloc
= BFD_RELOC_8
;
23974 reloc
= BFD_RELOC_16
;
23978 reloc
= BFD_RELOC_32
;
23981 reloc
= BFD_RELOC_64
;
23986 if (exp
->X_op
== O_secrel
)
23988 exp
->X_op
= O_symbol
;
23989 reloc
= BFD_RELOC_32_SECREL
;
23993 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
23996 #if defined (OBJ_COFF)
23998 arm_validate_fix (fixS
* fixP
)
24000 /* If the destination of the branch is a defined symbol which does not have
24001 the THUMB_FUNC attribute, then we must be calling a function which has
24002 the (interfacearm) attribute. We look for the Thumb entry point to that
24003 function and change the branch to refer to that function instead. */
24004 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
24005 && fixP
->fx_addsy
!= NULL
24006 && S_IS_DEFINED (fixP
->fx_addsy
)
24007 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
24009 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
24016 arm_force_relocation (struct fix
* fixp
)
24018 #if defined (OBJ_COFF) && defined (TE_PE)
24019 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
24023 /* In case we have a call or a branch to a function in ARM ISA mode from
24024 a thumb function or vice-versa force the relocation. These relocations
24025 are cleared off for some cores that might have blx and simple transformations
24029 switch (fixp
->fx_r_type
)
24031 case BFD_RELOC_ARM_PCREL_JUMP
:
24032 case BFD_RELOC_ARM_PCREL_CALL
:
24033 case BFD_RELOC_THUMB_PCREL_BLX
:
24034 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
24038 case BFD_RELOC_ARM_PCREL_BLX
:
24039 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24040 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24041 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24042 if (ARM_IS_FUNC (fixp
->fx_addsy
))
24051 /* Resolve these relocations even if the symbol is extern or weak.
24052 Technically this is probably wrong due to symbol preemption.
24053 In practice these relocations do not have enough range to be useful
24054 at dynamic link time, and some code (e.g. in the Linux kernel)
24055 expects these references to be resolved. */
24056 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
24057 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
24058 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
24059 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
24060 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24061 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
24062 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
24063 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
24064 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24065 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
24066 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
24067 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
24068 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
24069 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
24072 /* Always leave these relocations for the linker. */
24073 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24074 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24075 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24078 /* Always generate relocations against function symbols. */
24079 if (fixp
->fx_r_type
== BFD_RELOC_32
24081 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24084 return generic_force_reloc (fixp
);
24087 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24088 /* Relocations against function names must be left unadjusted,
24089 so that the linker can use this information to generate interworking
24090 stubs. The MIPS version of this function
24091 also prevents relocations that are mips-16 specific, but I do not
24092 know why it does this.
24095 There is one other problem that ought to be addressed here, but
24096 which currently is not: Taking the address of a label (rather
24097 than a function) and then later jumping to that address. Such
24098 addresses also ought to have their bottom bit set (assuming that
24099 they reside in Thumb code), but at the moment they will not. */
24102 arm_fix_adjustable (fixS
* fixP
)
24104 if (fixP
->fx_addsy
== NULL
)
24107 /* Preserve relocations against symbols with function type. */
24108 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24111 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24112 && fixP
->fx_subsy
== NULL
)
24115 /* We need the symbol name for the VTABLE entries. */
24116 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24117 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24120 /* Don't allow symbols to be discarded on GOT related relocs. */
24121 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24122 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24123 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24124 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24125 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24126 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24127 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24128 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24129 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24130 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24131 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24132 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24133 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24134 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24137 /* Similarly for group relocations. */
24138 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24139 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24140 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24143 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24144 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24145 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24146 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24147 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24148 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24149 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24150 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24151 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24154 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24155 offsets, so keep these symbols. */
24156 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24157 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24162 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24166 elf32_arm_target_format (void)
24169 return (target_big_endian
24170 ? "elf32-bigarm-symbian"
24171 : "elf32-littlearm-symbian");
24172 #elif defined (TE_VXWORKS)
24173 return (target_big_endian
24174 ? "elf32-bigarm-vxworks"
24175 : "elf32-littlearm-vxworks");
24176 #elif defined (TE_NACL)
24177 return (target_big_endian
24178 ? "elf32-bigarm-nacl"
24179 : "elf32-littlearm-nacl");
24181 if (target_big_endian
)
24182 return "elf32-bigarm";
24184 return "elf32-littlearm";
24189 armelf_frob_symbol (symbolS
* symp
,
24192 elf_frob_symbol (symp
, puntp
);
24196 /* MD interface: Finalization. */
24201 literal_pool
* pool
;
24203 /* Ensure that all the IT blocks are properly closed. */
24204 check_it_blocks_finished ();
24206 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24208 /* Put it at the end of the relevant section. */
24209 subseg_set (pool
->section
, pool
->sub_section
);
24211 arm_elf_change_section ();
24218 /* Remove any excess mapping symbols generated for alignment frags in
24219 SEC. We may have created a mapping symbol before a zero byte
24220 alignment; remove it if there's a mapping symbol after the
24223 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24224 void *dummy ATTRIBUTE_UNUSED
)
24226 segment_info_type
*seginfo
= seg_info (sec
);
24229 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24232 for (fragp
= seginfo
->frchainP
->frch_root
;
24234 fragp
= fragp
->fr_next
)
24236 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24237 fragS
*next
= fragp
->fr_next
;
24239 /* Variable-sized frags have been converted to fixed size by
24240 this point. But if this was variable-sized to start with,
24241 there will be a fixed-size frag after it. So don't handle
24243 if (sym
== NULL
|| next
== NULL
)
24246 if (S_GET_VALUE (sym
) < next
->fr_address
)
24247 /* Not at the end of this frag. */
24249 know (S_GET_VALUE (sym
) == next
->fr_address
);
24253 if (next
->tc_frag_data
.first_map
!= NULL
)
24255 /* Next frag starts with a mapping symbol. Discard this
24257 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24261 if (next
->fr_next
== NULL
)
24263 /* This mapping symbol is at the end of the section. Discard
24265 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24266 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24270 /* As long as we have empty frags without any mapping symbols,
24272 /* If the next frag is non-empty and does not start with a
24273 mapping symbol, then this mapping symbol is required. */
24274 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24277 next
= next
->fr_next
;
24279 while (next
!= NULL
);
24284 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24288 arm_adjust_symtab (void)
24293 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24295 if (ARM_IS_THUMB (sym
))
24297 if (THUMB_IS_FUNC (sym
))
24299 /* Mark the symbol as a Thumb function. */
24300 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24301 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24302 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
24304 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
24305 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
24307 as_bad (_("%s: unexpected function type: %d"),
24308 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
24310 else switch (S_GET_STORAGE_CLASS (sym
))
24313 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
24316 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
24319 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
24327 if (ARM_IS_INTERWORK (sym
))
24328 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
24335 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24337 if (ARM_IS_THUMB (sym
))
24339 elf_symbol_type
* elf_sym
;
24341 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
24342 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
24344 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
24345 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
24347 /* If it's a .thumb_func, declare it as so,
24348 otherwise tag label as .code 16. */
24349 if (THUMB_IS_FUNC (sym
))
24350 elf_sym
->internal_elf_sym
.st_target_internal
24351 = ST_BRANCH_TO_THUMB
;
24352 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
24353 elf_sym
->internal_elf_sym
.st_info
=
24354 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
24359 /* Remove any overlapping mapping symbols generated by alignment frags. */
24360 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
24361 /* Now do generic ELF adjustments. */
24362 elf_adjust_symtab ();
24366 /* MD interface: Initialization. */
24369 set_constant_flonums (void)
24373 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
24374 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
24378 /* Auto-select Thumb mode if it's the only available instruction set for the
24379 given architecture. */
24382 autoselect_thumb_from_cpu_variant (void)
24384 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
24385 opcode_select (16);
24394 if ( (arm_ops_hsh
= hash_new ()) == NULL
24395 || (arm_cond_hsh
= hash_new ()) == NULL
24396 || (arm_shift_hsh
= hash_new ()) == NULL
24397 || (arm_psr_hsh
= hash_new ()) == NULL
24398 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
24399 || (arm_reg_hsh
= hash_new ()) == NULL
24400 || (arm_reloc_hsh
= hash_new ()) == NULL
24401 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
24402 as_fatal (_("virtual memory exhausted"));
24404 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
24405 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
24406 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
24407 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
24408 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
24409 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
24410 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
24411 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
24412 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
24413 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
24414 (void *) (v7m_psrs
+ i
));
24415 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
24416 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
24418 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
24420 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
24421 (void *) (barrier_opt_names
+ i
));
24423 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
24425 struct reloc_entry
* entry
= reloc_names
+ i
;
24427 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
24428 /* This makes encode_branch() use the EABI versions of this relocation. */
24429 entry
->reloc
= BFD_RELOC_UNUSED
;
24431 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
24435 set_constant_flonums ();
24437 /* Set the cpu variant based on the command-line options. We prefer
24438 -mcpu= over -march= if both are set (as for GCC); and we prefer
24439 -mfpu= over any other way of setting the floating point unit.
24440 Use of legacy options with new options are faulted. */
24443 if (mcpu_cpu_opt
|| march_cpu_opt
)
24444 as_bad (_("use of old and new-style options to set CPU type"));
24446 mcpu_cpu_opt
= legacy_cpu
;
24448 else if (!mcpu_cpu_opt
)
24449 mcpu_cpu_opt
= march_cpu_opt
;
24454 as_bad (_("use of old and new-style options to set FPU type"));
24456 mfpu_opt
= legacy_fpu
;
24458 else if (!mfpu_opt
)
24460 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24461 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24462 /* Some environments specify a default FPU. If they don't, infer it
24463 from the processor. */
24465 mfpu_opt
= mcpu_fpu_opt
;
24467 mfpu_opt
= march_fpu_opt
;
24469 mfpu_opt
= &fpu_default
;
24475 if (mcpu_cpu_opt
!= NULL
)
24476 mfpu_opt
= &fpu_default
;
24477 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
24478 mfpu_opt
= &fpu_arch_vfp_v2
;
24480 mfpu_opt
= &fpu_arch_fpa
;
24486 mcpu_cpu_opt
= &cpu_default
;
24487 selected_cpu
= cpu_default
;
24489 else if (no_cpu_selected ())
24490 selected_cpu
= cpu_default
;
24493 selected_cpu
= *mcpu_cpu_opt
;
24495 mcpu_cpu_opt
= &arm_arch_any
;
24498 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
24500 autoselect_thumb_from_cpu_variant ();
24502 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
24504 #if defined OBJ_COFF || defined OBJ_ELF
24506 unsigned int flags
= 0;
24508 #if defined OBJ_ELF
24509 flags
= meabi_flags
;
24511 switch (meabi_flags
)
24513 case EF_ARM_EABI_UNKNOWN
:
24515 /* Set the flags in the private structure. */
24516 if (uses_apcs_26
) flags
|= F_APCS26
;
24517 if (support_interwork
) flags
|= F_INTERWORK
;
24518 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
24519 if (pic_code
) flags
|= F_PIC
;
24520 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
24521 flags
|= F_SOFT_FLOAT
;
24523 switch (mfloat_abi_opt
)
24525 case ARM_FLOAT_ABI_SOFT
:
24526 case ARM_FLOAT_ABI_SOFTFP
:
24527 flags
|= F_SOFT_FLOAT
;
24530 case ARM_FLOAT_ABI_HARD
:
24531 if (flags
& F_SOFT_FLOAT
)
24532 as_bad (_("hard-float conflicts with specified fpu"));
24536 /* Using pure-endian doubles (even if soft-float). */
24537 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
24538 flags
|= F_VFP_FLOAT
;
24540 #if defined OBJ_ELF
24541 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
24542 flags
|= EF_ARM_MAVERICK_FLOAT
;
24545 case EF_ARM_EABI_VER4
:
24546 case EF_ARM_EABI_VER5
:
24547 /* No additional flags to set. */
24554 bfd_set_private_flags (stdoutput
, flags
);
24556 /* We have run out flags in the COFF header to encode the
24557 status of ATPCS support, so instead we create a dummy,
24558 empty, debug section called .arm.atpcs. */
24563 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
24567 bfd_set_section_flags
24568 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
24569 bfd_set_section_size (stdoutput
, sec
, 0);
24570 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
24576 /* Record the CPU type as well. */
24577 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
24578 mach
= bfd_mach_arm_iWMMXt2
;
24579 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
24580 mach
= bfd_mach_arm_iWMMXt
;
24581 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
24582 mach
= bfd_mach_arm_XScale
;
24583 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
24584 mach
= bfd_mach_arm_ep9312
;
24585 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
24586 mach
= bfd_mach_arm_5TE
;
24587 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
24589 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24590 mach
= bfd_mach_arm_5T
;
24592 mach
= bfd_mach_arm_5
;
24594 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
24596 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24597 mach
= bfd_mach_arm_4T
;
24599 mach
= bfd_mach_arm_4
;
24601 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
24602 mach
= bfd_mach_arm_3M
;
24603 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
24604 mach
= bfd_mach_arm_3
;
24605 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
24606 mach
= bfd_mach_arm_2a
;
24607 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
24608 mach
= bfd_mach_arm_2
;
24610 mach
= bfd_mach_arm_unknown
;
24612 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
24615 /* Command line processing. */
24618 Invocation line includes a switch not recognized by the base assembler.
24619 See if it's a processor-specific option.
24621 This routine is somewhat complicated by the need for backwards
24622 compatibility (since older releases of gcc can't be changed).
24623 The new options try to make the interface as compatible as
24626 New options (supported) are:
24628 -mcpu=<cpu name> Assemble for selected processor
24629 -march=<architecture name> Assemble for selected architecture
24630 -mfpu=<fpu architecture> Assemble for selected FPU.
24631 -EB/-mbig-endian Big-endian
24632 -EL/-mlittle-endian Little-endian
24633 -k Generate PIC code
24634 -mthumb Start in Thumb mode
24635 -mthumb-interwork Code supports ARM/Thumb interworking
24637 -m[no-]warn-deprecated Warn about deprecated features
24638 -m[no-]warn-syms Warn when symbols match instructions
24640 For now we will also provide support for:
24642 -mapcs-32 32-bit Program counter
24643 -mapcs-26 26-bit Program counter
24644 -macps-float Floats passed in FP registers
24645 -mapcs-reentrant Reentrant code
24647 (sometime these will probably be replaced with -mapcs=<list of options>
24648 and -matpcs=<list of options>)
24650 The remaining options are only supported for back-wards compatibility.
24651 Cpu variants, the arm part is optional:
24652 -m[arm]1 Currently not supported.
24653 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24654 -m[arm]3 Arm 3 processor
24655 -m[arm]6[xx], Arm 6 processors
24656 -m[arm]7[xx][t][[d]m] Arm 7 processors
24657 -m[arm]8[10] Arm 8 processors
24658 -m[arm]9[20][tdmi] Arm 9 processors
24659 -mstrongarm[110[0]] StrongARM processors
24660 -mxscale XScale processors
24661 -m[arm]v[2345[t[e]]] Arm architectures
24662 -mall All (except the ARM1)
24664 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24665 -mfpe-old (No float load/store multiples)
24666 -mvfpxd VFP Single precision
24668 -mno-fpu Disable all floating point instructions
24670 The following CPU names are recognized:
24671 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24672 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24673 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24674 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24675 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24676 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24677 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24681 const char * md_shortopts
= "m:k";
24683 #ifdef ARM_BI_ENDIAN
24684 #define OPTION_EB (OPTION_MD_BASE + 0)
24685 #define OPTION_EL (OPTION_MD_BASE + 1)
24687 #if TARGET_BYTES_BIG_ENDIAN
24688 #define OPTION_EB (OPTION_MD_BASE + 0)
24690 #define OPTION_EL (OPTION_MD_BASE + 1)
24693 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24695 struct option md_longopts
[] =
24698 {"EB", no_argument
, NULL
, OPTION_EB
},
24701 {"EL", no_argument
, NULL
, OPTION_EL
},
24703 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
24704 {NULL
, no_argument
, NULL
, 0}
24708 size_t md_longopts_size
= sizeof (md_longopts
);
24710 struct arm_option_table
24712 char *option
; /* Option name to match. */
24713 char *help
; /* Help information. */
24714 int *var
; /* Variable to change. */
24715 int value
; /* What to change it to. */
24716 char *deprecated
; /* If non-null, print this message. */
24719 struct arm_option_table arm_opts
[] =
24721 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
24722 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
24723 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24724 &support_interwork
, 1, NULL
},
24725 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
24726 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
24727 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
24729 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
24730 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
24731 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
24732 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
24735 /* These are recognized by the assembler, but have no affect on code. */
24736 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
24737 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
24739 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
24740 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24741 &warn_on_deprecated
, 0, NULL
},
24742 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
24743 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
24744 {NULL
, NULL
, NULL
, 0, NULL
}
24747 struct arm_legacy_option_table
24749 char *option
; /* Option name to match. */
24750 const arm_feature_set
**var
; /* Variable to change. */
24751 const arm_feature_set value
; /* What to change it to. */
24752 char *deprecated
; /* If non-null, print this message. */
24755 const struct arm_legacy_option_table arm_legacy_opts
[] =
24757 /* DON'T add any new processors to this list -- we want the whole list
24758 to go away... Add them to the processors table instead. */
24759 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24760 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24761 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24762 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24763 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24764 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24765 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24766 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24767 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24768 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24769 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24770 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24771 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24772 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24773 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24774 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24775 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24776 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24777 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24778 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24779 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24780 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24781 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24782 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24783 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24784 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24785 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24786 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24787 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24788 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24789 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24790 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24791 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24792 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24793 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24794 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24795 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24796 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24797 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24798 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24799 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24800 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24801 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24802 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24803 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24804 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24805 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24806 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24807 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24808 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24809 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24810 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24811 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24812 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24813 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24814 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24815 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24816 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24817 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24818 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24819 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24820 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24821 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24822 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24823 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24824 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24825 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24826 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24827 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
24828 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
24829 N_("use -mcpu=strongarm110")},
24830 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
24831 N_("use -mcpu=strongarm1100")},
24832 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
24833 N_("use -mcpu=strongarm1110")},
24834 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
24835 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
24836 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
24838 /* Architecture variants -- don't add any more to this list either. */
24839 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24840 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24841 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24842 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24843 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24844 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24845 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24846 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24847 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24848 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24849 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24850 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24851 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24852 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24853 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24854 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24855 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24856 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24858 /* Floating point variants -- don't add any more to this list either. */
24859 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
24860 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
24861 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
24862 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
24863 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24865 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
24868 struct arm_cpu_option_table
24872 const arm_feature_set value
;
24873 /* For some CPUs we assume an FPU unless the user explicitly sets
24875 const arm_feature_set default_fpu
;
24876 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24878 const char *canonical_name
;
24881 /* This list should, at a minimum, contain all the cpu names
24882 recognized by GCC. */
24883 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24884 static const struct arm_cpu_option_table arm_cpus
[] =
24886 ARM_CPU_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
, NULL
),
24887 ARM_CPU_OPT ("arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
),
24888 ARM_CPU_OPT ("arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
),
24889 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24890 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24891 ARM_CPU_OPT ("arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24892 ARM_CPU_OPT ("arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24893 ARM_CPU_OPT ("arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24894 ARM_CPU_OPT ("arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24895 ARM_CPU_OPT ("arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24896 ARM_CPU_OPT ("arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24897 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24898 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24899 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24900 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24901 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24902 ARM_CPU_OPT ("arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24903 ARM_CPU_OPT ("arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24904 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24905 ARM_CPU_OPT ("arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24906 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24907 ARM_CPU_OPT ("arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24908 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24909 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24910 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24911 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24912 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24913 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24914 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24915 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24916 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24917 ARM_CPU_OPT ("arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24918 ARM_CPU_OPT ("arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24919 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24920 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24921 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24922 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24923 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24924 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24925 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"),
24926 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24927 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24928 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24929 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24930 ARM_CPU_OPT ("fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24931 ARM_CPU_OPT ("fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24932 /* For V5 or later processors we default to using VFP; but the user
24933 should really set the FPU type explicitly. */
24934 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24935 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24936 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24937 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24938 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24939 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24940 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"),
24941 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24942 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24943 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"),
24944 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24945 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24946 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24947 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24948 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24949 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"),
24950 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24951 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24952 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24953 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
,
24955 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24956 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24957 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24958 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24959 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24960 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24961 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"),
24962 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
),
24963 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
,
24965 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
),
24966 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"),
24967 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"),
24968 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
),
24969 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
),
24970 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ
, FPU_NONE
, NULL
),
24971 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ
, FPU_ARCH_VFP_V2
, NULL
),
24972 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC
,
24973 FPU_NONE
, "Cortex-A5"),
24974 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24976 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC
,
24977 ARM_FEATURE_COPROC (FPU_VFP_V3
24978 | FPU_NEON_EXT_V1
),
24980 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC
,
24981 ARM_FEATURE_COPROC (FPU_VFP_V3
24982 | FPU_NEON_EXT_V1
),
24984 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24986 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24988 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24990 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24992 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24994 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24996 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24998 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25000 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"),
25001 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
25003 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV
,
25004 FPU_NONE
, "Cortex-R5"),
25005 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV
,
25006 FPU_ARCH_VFP_V3D16
,
25008 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M7"),
25009 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"),
25010 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"),
25011 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"),
25012 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"),
25013 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0+"),
25014 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25017 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25021 /* ??? XSCALE is really an architecture. */
25022 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25023 /* ??? iwmmxt is not a processor. */
25024 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
),
25025 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
),
25026 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
25028 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
25029 FPU_ARCH_MAVERICK
, "ARM920T"),
25030 /* Marvell processors. */
25031 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25033 ARM_EXT2_V6T2_V8M
),
25034 FPU_ARCH_VFP_V3D16
, NULL
),
25035 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
25037 ARM_EXT2_V6T2_V8M
),
25038 FPU_ARCH_NEON_VFP_V4
, NULL
),
25039 /* APM X-Gene family. */
25040 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25042 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25045 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
25049 struct arm_arch_option_table
25053 const arm_feature_set value
;
25054 const arm_feature_set default_fpu
;
25057 /* This list should, at a minimum, contain all the architecture names
25058 recognized by GCC. */
25059 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25060 static const struct arm_arch_option_table arm_archs
[] =
25062 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
25063 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
25064 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
25065 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25066 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25067 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
25068 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
25069 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
25070 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
25071 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
25072 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
25073 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
25074 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
25075 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
25076 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
25077 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
25078 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
25079 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25080 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25081 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
25082 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
25083 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25084 kept to preserve existing behaviour. */
25085 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25086 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25087 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
25088 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
25089 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
25090 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25091 kept to preserve existing behaviour. */
25092 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25093 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25094 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
25095 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
25096 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
25097 /* The official spelling of the ARMv7 profile variants is the dashed form.
25098 Accept the non-dashed form for compatibility with old toolchains. */
25099 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25100 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
25101 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25102 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25103 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25104 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25105 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25106 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
25107 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
25108 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
25109 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
25110 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
25111 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
25112 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
25113 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
25114 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
25115 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25117 #undef ARM_ARCH_OPT
25119 /* ISA extensions in the co-processor and main instruction set space. */
25120 struct arm_option_extension_value_table
25124 const arm_feature_set merge_value
;
25125 const arm_feature_set clear_value
;
25126 const arm_feature_set allowed_archs
;
25129 /* The following table must be in alphabetical order with a NULL last entry.
25131 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25132 static const struct arm_option_extension_value_table arm_extensions
[] =
25134 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25135 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25136 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25137 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
25138 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25139 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
25140 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25141 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25142 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25144 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25145 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25146 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25147 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
25148 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ANY
),
25149 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
25150 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ANY
),
25151 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
25152 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ANY
),
25153 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25154 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25155 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25156 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
25157 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
25158 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25159 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25160 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25161 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
25162 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
25163 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
25164 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25165 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25166 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25167 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V7A
)),
25168 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
25170 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
25171 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25172 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8
,
25173 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
25174 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25175 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
25176 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ANY
),
25177 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25181 /* ISA floating-point and Advanced SIMD extensions. */
25182 struct arm_option_fpu_value_table
25185 const arm_feature_set value
;
25188 /* This list should, at a minimum, contain all the fpu names
25189 recognized by GCC. */
25190 static const struct arm_option_fpu_value_table arm_fpus
[] =
25192 {"softfpa", FPU_NONE
},
25193 {"fpe", FPU_ARCH_FPE
},
25194 {"fpe2", FPU_ARCH_FPE
},
25195 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
25196 {"fpa", FPU_ARCH_FPA
},
25197 {"fpa10", FPU_ARCH_FPA
},
25198 {"fpa11", FPU_ARCH_FPA
},
25199 {"arm7500fe", FPU_ARCH_FPA
},
25200 {"softvfp", FPU_ARCH_VFP
},
25201 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
25202 {"vfp", FPU_ARCH_VFP_V2
},
25203 {"vfp9", FPU_ARCH_VFP_V2
},
25204 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
25205 {"vfp10", FPU_ARCH_VFP_V2
},
25206 {"vfp10-r0", FPU_ARCH_VFP_V1
},
25207 {"vfpxd", FPU_ARCH_VFP_V1xD
},
25208 {"vfpv2", FPU_ARCH_VFP_V2
},
25209 {"vfpv3", FPU_ARCH_VFP_V3
},
25210 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
25211 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
25212 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
25213 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
25214 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
25215 {"arm1020t", FPU_ARCH_VFP_V1
},
25216 {"arm1020e", FPU_ARCH_VFP_V2
},
25217 {"arm1136jfs", FPU_ARCH_VFP_V2
},
25218 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
25219 {"maverick", FPU_ARCH_MAVERICK
},
25220 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
25221 {"neon-fp16", FPU_ARCH_NEON_FP16
},
25222 {"vfpv4", FPU_ARCH_VFP_V4
},
25223 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
25224 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
25225 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
25226 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
25227 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
25228 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
25229 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
25230 {"crypto-neon-fp-armv8",
25231 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
25232 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
25233 {"crypto-neon-fp-armv8.1",
25234 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
25235 {NULL
, ARM_ARCH_NONE
}
25238 struct arm_option_value_table
25244 static const struct arm_option_value_table arm_float_abis
[] =
25246 {"hard", ARM_FLOAT_ABI_HARD
},
25247 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
25248 {"soft", ARM_FLOAT_ABI_SOFT
},
25253 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25254 static const struct arm_option_value_table arm_eabis
[] =
25256 {"gnu", EF_ARM_EABI_UNKNOWN
},
25257 {"4", EF_ARM_EABI_VER4
},
25258 {"5", EF_ARM_EABI_VER5
},
25263 struct arm_long_option_table
25265 char * option
; /* Substring to match. */
25266 char * help
; /* Help information. */
25267 int (* func
) (char * subopt
); /* Function to decode sub-option. */
25268 char * deprecated
; /* If non-null, print this message. */
25272 arm_parse_extension (char *str
, const arm_feature_set
**opt_p
)
25274 arm_feature_set
*ext_set
= (arm_feature_set
*)
25275 xmalloc (sizeof (arm_feature_set
));
25277 /* We insist on extensions being specified in alphabetical order, and with
25278 extensions being added before being removed. We achieve this by having
25279 the global ARM_EXTENSIONS table in alphabetical order, and using the
25280 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25281 or removing it (0) and only allowing it to change in the order
25283 const struct arm_option_extension_value_table
* opt
= NULL
;
25284 int adding_value
= -1;
25286 /* Copy the feature set, so that we can modify it. */
25287 *ext_set
= **opt_p
;
25290 while (str
!= NULL
&& *str
!= 0)
25297 as_bad (_("invalid architectural extension"));
25302 ext
= strchr (str
, '+');
25307 len
= strlen (str
);
25309 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
25311 if (adding_value
!= 0)
25314 opt
= arm_extensions
;
25322 if (adding_value
== -1)
25325 opt
= arm_extensions
;
25327 else if (adding_value
!= 1)
25329 as_bad (_("must specify extensions to add before specifying "
25330 "those to remove"));
25337 as_bad (_("missing architectural extension"));
25341 gas_assert (adding_value
!= -1);
25342 gas_assert (opt
!= NULL
);
25344 /* Scan over the options table trying to find an exact match. */
25345 for (; opt
->name
!= NULL
; opt
++)
25346 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25348 /* Check we can apply the extension to this architecture. */
25349 if (!ARM_CPU_HAS_FEATURE (*ext_set
, opt
->allowed_archs
))
25351 as_bad (_("extension does not apply to the base architecture"));
25355 /* Add or remove the extension. */
25357 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
25359 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
25364 if (opt
->name
== NULL
)
25366 /* Did we fail to find an extension because it wasn't specified in
25367 alphabetical order, or because it does not exist? */
25369 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
25370 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25373 if (opt
->name
== NULL
)
25374 as_bad (_("unknown architectural extension `%s'"), str
);
25376 as_bad (_("architectural extensions must be specified in "
25377 "alphabetical order"));
25383 /* We should skip the extension we've just matched the next time
25395 arm_parse_cpu (char *str
)
25397 const struct arm_cpu_option_table
*opt
;
25398 char *ext
= strchr (str
, '+');
25404 len
= strlen (str
);
25408 as_bad (_("missing cpu name `%s'"), str
);
25412 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
25413 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25415 mcpu_cpu_opt
= &opt
->value
;
25416 mcpu_fpu_opt
= &opt
->default_fpu
;
25417 if (opt
->canonical_name
)
25419 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
25420 strcpy (selected_cpu_name
, opt
->canonical_name
);
25426 if (len
>= sizeof selected_cpu_name
)
25427 len
= (sizeof selected_cpu_name
) - 1;
25429 for (i
= 0; i
< len
; i
++)
25430 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25431 selected_cpu_name
[i
] = 0;
25435 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
25440 as_bad (_("unknown cpu `%s'"), str
);
25445 arm_parse_arch (char *str
)
25447 const struct arm_arch_option_table
*opt
;
25448 char *ext
= strchr (str
, '+');
25454 len
= strlen (str
);
25458 as_bad (_("missing architecture name `%s'"), str
);
25462 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
25463 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25465 march_cpu_opt
= &opt
->value
;
25466 march_fpu_opt
= &opt
->default_fpu
;
25467 strcpy (selected_cpu_name
, opt
->name
);
25470 return arm_parse_extension (ext
, &march_cpu_opt
);
25475 as_bad (_("unknown architecture `%s'\n"), str
);
25480 arm_parse_fpu (char * str
)
25482 const struct arm_option_fpu_value_table
* opt
;
25484 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
25485 if (streq (opt
->name
, str
))
25487 mfpu_opt
= &opt
->value
;
25491 as_bad (_("unknown floating point format `%s'\n"), str
);
25496 arm_parse_float_abi (char * str
)
25498 const struct arm_option_value_table
* opt
;
25500 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
25501 if (streq (opt
->name
, str
))
25503 mfloat_abi_opt
= opt
->value
;
25507 as_bad (_("unknown floating point abi `%s'\n"), str
);
25513 arm_parse_eabi (char * str
)
25515 const struct arm_option_value_table
*opt
;
25517 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
25518 if (streq (opt
->name
, str
))
25520 meabi_flags
= opt
->value
;
25523 as_bad (_("unknown EABI `%s'\n"), str
);
25529 arm_parse_it_mode (char * str
)
25531 bfd_boolean ret
= TRUE
;
25533 if (streq ("arm", str
))
25534 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
25535 else if (streq ("thumb", str
))
25536 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
25537 else if (streq ("always", str
))
25538 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
25539 else if (streq ("never", str
))
25540 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
25543 as_bad (_("unknown implicit IT mode `%s', should be "\
25544 "arm, thumb, always, or never."), str
);
25552 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED
)
25554 codecomposer_syntax
= TRUE
;
25555 arm_comment_chars
[0] = ';';
25556 arm_line_separator_chars
[0] = 0;
25560 struct arm_long_option_table arm_long_opts
[] =
25562 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25563 arm_parse_cpu
, NULL
},
25564 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25565 arm_parse_arch
, NULL
},
25566 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25567 arm_parse_fpu
, NULL
},
25568 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25569 arm_parse_float_abi
, NULL
},
25571 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25572 arm_parse_eabi
, NULL
},
25574 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25575 arm_parse_it_mode
, NULL
},
25576 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25577 arm_ccs_mode
, NULL
},
25578 {NULL
, NULL
, 0, NULL
}
25582 md_parse_option (int c
, char * arg
)
25584 struct arm_option_table
*opt
;
25585 const struct arm_legacy_option_table
*fopt
;
25586 struct arm_long_option_table
*lopt
;
25592 target_big_endian
= 1;
25598 target_big_endian
= 0;
25602 case OPTION_FIX_V4BX
:
25607 /* Listing option. Just ignore these, we don't support additional
25612 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25614 if (c
== opt
->option
[0]
25615 && ((arg
== NULL
&& opt
->option
[1] == 0)
25616 || streq (arg
, opt
->option
+ 1)))
25618 /* If the option is deprecated, tell the user. */
25619 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
25620 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25621 arg
? arg
: "", _(opt
->deprecated
));
25623 if (opt
->var
!= NULL
)
25624 *opt
->var
= opt
->value
;
25630 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
25632 if (c
== fopt
->option
[0]
25633 && ((arg
== NULL
&& fopt
->option
[1] == 0)
25634 || streq (arg
, fopt
->option
+ 1)))
25636 /* If the option is deprecated, tell the user. */
25637 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
25638 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25639 arg
? arg
: "", _(fopt
->deprecated
));
25641 if (fopt
->var
!= NULL
)
25642 *fopt
->var
= &fopt
->value
;
25648 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25650 /* These options are expected to have an argument. */
25651 if (c
== lopt
->option
[0]
25653 && strncmp (arg
, lopt
->option
+ 1,
25654 strlen (lopt
->option
+ 1)) == 0)
25656 /* If the option is deprecated, tell the user. */
25657 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
25658 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
25659 _(lopt
->deprecated
));
25661 /* Call the sup-option parser. */
25662 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
25673 md_show_usage (FILE * fp
)
25675 struct arm_option_table
*opt
;
25676 struct arm_long_option_table
*lopt
;
25678 fprintf (fp
, _(" ARM-specific assembler options:\n"));
25680 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25681 if (opt
->help
!= NULL
)
25682 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
25684 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25685 if (lopt
->help
!= NULL
)
25686 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
25690 -EB assemble code for a big-endian cpu\n"));
25695 -EL assemble code for a little-endian cpu\n"));
25699 --fix-v4bx Allow BX in ARMv4 code\n"));
25707 arm_feature_set flags
;
25708 } cpu_arch_ver_table
;
25710 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25711 must be sorted least features first but some reordering is needed, eg. for
25712 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25713 static const cpu_arch_ver_table cpu_arch_ver
[] =
25719 {4, ARM_ARCH_V5TE
},
25720 {5, ARM_ARCH_V5TEJ
},
25724 {11, ARM_ARCH_V6M
},
25725 {12, ARM_ARCH_V6SM
},
25726 {8, ARM_ARCH_V6T2
},
25727 {10, ARM_ARCH_V7VE
},
25728 {10, ARM_ARCH_V7R
},
25729 {10, ARM_ARCH_V7M
},
25730 {14, ARM_ARCH_V8A
},
25731 {16, ARM_ARCH_V8M_BASE
},
25732 {17, ARM_ARCH_V8M_MAIN
},
25736 /* Set an attribute if it has not already been set by the user. */
25738 aeabi_set_attribute_int (int tag
, int value
)
25741 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25742 || !attributes_set_explicitly
[tag
])
25743 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
25747 aeabi_set_attribute_string (int tag
, const char *value
)
25750 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25751 || !attributes_set_explicitly
[tag
])
25752 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
25755 /* Set the public EABI object attributes. */
25757 aeabi_set_public_attributes (void)
25762 int fp16_optional
= 0;
25763 arm_feature_set flags
;
25764 arm_feature_set tmp
;
25765 arm_feature_set arm_arch_v8m_base
= ARM_ARCH_V8M_BASE
;
25766 const cpu_arch_ver_table
*p
;
25768 /* Choose the architecture based on the capabilities of the requested cpu
25769 (if any) and/or the instructions actually used. */
25770 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
25771 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
25772 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
25774 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
25775 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
25777 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
25778 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
25780 selected_cpu
= flags
;
25782 /* Allow the user to override the reported architecture. */
25785 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
25786 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
25789 /* We need to make sure that the attributes do not identify us as v6S-M
25790 when the only v6S-M feature in use is the Operating System Extensions. */
25791 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
25792 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
25793 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
25797 for (p
= cpu_arch_ver
; p
->val
; p
++)
25799 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
25802 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
25806 /* The table lookup above finds the last architecture to contribute
25807 a new feature. Unfortunately, Tag13 is a subset of the union of
25808 v6T2 and v7-M, so it is never seen as contributing a new feature.
25809 We can not search for the last entry which is entirely used,
25810 because if no CPU is specified we build up only those flags
25811 actually used. Perhaps we should separate out the specified
25812 and implicit cases. Avoid taking this path for -march=all by
25813 checking for contradictory v7-A / v7-M features. */
25814 if (arch
== TAG_CPU_ARCH_V7
25815 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
25816 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
25817 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
25818 arch
= TAG_CPU_ARCH_V7E_M
;
25820 ARM_CLEAR_FEATURE (tmp
, flags
, arm_arch_v8m_base
);
25821 if (arch
== TAG_CPU_ARCH_V8M_BASE
&& ARM_CPU_HAS_FEATURE (tmp
, arm_arch_any
))
25822 arch
= TAG_CPU_ARCH_V8M_MAIN
;
25824 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
25825 coming from ARMv8-A. However, since ARMv8-A has more instructions than
25826 ARMv8-M, -march=all must be detected as ARMv8-A. */
25827 if (arch
== TAG_CPU_ARCH_V8M_MAIN
25828 && ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
25829 arch
= TAG_CPU_ARCH_V8
;
25831 /* Tag_CPU_name. */
25832 if (selected_cpu_name
[0])
25836 q
= selected_cpu_name
;
25837 if (strncmp (q
, "armv", 4) == 0)
25842 for (i
= 0; q
[i
]; i
++)
25843 q
[i
] = TOUPPER (q
[i
]);
25845 aeabi_set_attribute_string (Tag_CPU_name
, q
);
25848 /* Tag_CPU_arch. */
25849 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
25851 /* Tag_CPU_arch_profile. */
25852 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
25853 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25854 || (ARM_CPU_HAS_FEATURE (flags
, arm_ext_atomics
)
25855 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
)))
25857 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
25859 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
25864 if (profile
!= '\0')
25865 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
25867 /* Tag_ARM_ISA_use. */
25868 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
25870 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
25872 /* Tag_THUMB_ISA_use. */
25873 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
25878 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25879 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
25881 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
25885 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
25888 /* Tag_VFP_arch. */
25889 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
25890 aeabi_set_attribute_int (Tag_VFP_arch
,
25891 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25893 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
25894 aeabi_set_attribute_int (Tag_VFP_arch
,
25895 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25897 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
25900 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
25902 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
25904 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
25907 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
25908 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
25909 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
25910 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
25911 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
25913 /* Tag_ABI_HardFP_use. */
25914 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
25915 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
25916 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
25918 /* Tag_WMMX_arch. */
25919 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
25920 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
25921 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
25922 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
25924 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25925 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
25926 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
25927 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
25929 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
25931 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
25935 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
25940 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25941 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
25942 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
25946 We set Tag_DIV_use to two when integer divide instructions have been used
25947 in ARM state, or when Thumb integer divide instructions have been used,
25948 but we have no architecture profile set, nor have we any ARM instructions.
25950 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
25951 by the base architecture.
25953 For new architectures we will have to check these tests. */
25954 gas_assert (arch
<= TAG_CPU_ARCH_V8
25955 || (arch
>= TAG_CPU_ARCH_V8M_BASE
25956 && arch
<= TAG_CPU_ARCH_V8M_MAIN
));
25957 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25958 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
25959 aeabi_set_attribute_int (Tag_DIV_use
, 0);
25960 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
25961 || (profile
== '\0'
25962 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
25963 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
25964 aeabi_set_attribute_int (Tag_DIV_use
, 2);
25966 /* Tag_MP_extension_use. */
25967 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
25968 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
25970 /* Tag Virtualization_use. */
25971 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
25973 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
25976 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
25979 /* Add the default contents for the .ARM.attributes section. */
25983 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
25986 aeabi_set_public_attributes ();
25988 #endif /* OBJ_ELF */
25991 /* Parse a .cpu directive. */
25994 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
25996 const struct arm_cpu_option_table
*opt
;
26000 name
= input_line_pointer
;
26001 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26002 input_line_pointer
++;
26003 saved_char
= *input_line_pointer
;
26004 *input_line_pointer
= 0;
26006 /* Skip the first "all" entry. */
26007 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
26008 if (streq (opt
->name
, name
))
26010 mcpu_cpu_opt
= &opt
->value
;
26011 selected_cpu
= opt
->value
;
26012 if (opt
->canonical_name
)
26013 strcpy (selected_cpu_name
, opt
->canonical_name
);
26017 for (i
= 0; opt
->name
[i
]; i
++)
26018 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
26020 selected_cpu_name
[i
] = 0;
26022 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26023 *input_line_pointer
= saved_char
;
26024 demand_empty_rest_of_line ();
26027 as_bad (_("unknown cpu `%s'"), name
);
26028 *input_line_pointer
= saved_char
;
26029 ignore_rest_of_line ();
26033 /* Parse a .arch directive. */
26036 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
26038 const struct arm_arch_option_table
*opt
;
26042 name
= input_line_pointer
;
26043 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26044 input_line_pointer
++;
26045 saved_char
= *input_line_pointer
;
26046 *input_line_pointer
= 0;
26048 /* Skip the first "all" entry. */
26049 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26050 if (streq (opt
->name
, name
))
26052 mcpu_cpu_opt
= &opt
->value
;
26053 selected_cpu
= opt
->value
;
26054 strcpy (selected_cpu_name
, opt
->name
);
26055 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26056 *input_line_pointer
= saved_char
;
26057 demand_empty_rest_of_line ();
26061 as_bad (_("unknown architecture `%s'\n"), name
);
26062 *input_line_pointer
= saved_char
;
26063 ignore_rest_of_line ();
26067 /* Parse a .object_arch directive. */
26070 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
26072 const struct arm_arch_option_table
*opt
;
26076 name
= input_line_pointer
;
26077 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26078 input_line_pointer
++;
26079 saved_char
= *input_line_pointer
;
26080 *input_line_pointer
= 0;
26082 /* Skip the first "all" entry. */
26083 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26084 if (streq (opt
->name
, name
))
26086 object_arch
= &opt
->value
;
26087 *input_line_pointer
= saved_char
;
26088 demand_empty_rest_of_line ();
26092 as_bad (_("unknown architecture `%s'\n"), name
);
26093 *input_line_pointer
= saved_char
;
26094 ignore_rest_of_line ();
26097 /* Parse a .arch_extension directive. */
26100 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
26102 const struct arm_option_extension_value_table
*opt
;
26105 int adding_value
= 1;
26107 name
= input_line_pointer
;
26108 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26109 input_line_pointer
++;
26110 saved_char
= *input_line_pointer
;
26111 *input_line_pointer
= 0;
26113 if (strlen (name
) >= 2
26114 && strncmp (name
, "no", 2) == 0)
26120 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26121 if (streq (opt
->name
, name
))
26123 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt
, opt
->allowed_archs
))
26125 as_bad (_("architectural extension `%s' is not allowed for the "
26126 "current base architecture"), name
);
26131 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
,
26134 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->clear_value
);
26136 mcpu_cpu_opt
= &selected_cpu
;
26137 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26138 *input_line_pointer
= saved_char
;
26139 demand_empty_rest_of_line ();
26143 if (opt
->name
== NULL
)
26144 as_bad (_("unknown architecture extension `%s'\n"), name
);
26146 *input_line_pointer
= saved_char
;
26147 ignore_rest_of_line ();
26150 /* Parse a .fpu directive. */
26153 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
26155 const struct arm_option_fpu_value_table
*opt
;
26159 name
= input_line_pointer
;
26160 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26161 input_line_pointer
++;
26162 saved_char
= *input_line_pointer
;
26163 *input_line_pointer
= 0;
26165 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26166 if (streq (opt
->name
, name
))
26168 mfpu_opt
= &opt
->value
;
26169 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26170 *input_line_pointer
= saved_char
;
26171 demand_empty_rest_of_line ();
26175 as_bad (_("unknown floating point format `%s'\n"), name
);
26176 *input_line_pointer
= saved_char
;
26177 ignore_rest_of_line ();
26180 /* Copy symbol information. */
26183 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
26185 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
26189 /* Given a symbolic attribute NAME, return the proper integer value.
26190 Returns -1 if the attribute is not known. */
26193 arm_convert_symbolic_attribute (const char *name
)
26195 static const struct
26200 attribute_table
[] =
26202 /* When you modify this table you should
26203 also modify the list in doc/c-arm.texi. */
26204 #define T(tag) {#tag, tag}
26205 T (Tag_CPU_raw_name
),
26208 T (Tag_CPU_arch_profile
),
26209 T (Tag_ARM_ISA_use
),
26210 T (Tag_THUMB_ISA_use
),
26214 T (Tag_Advanced_SIMD_arch
),
26215 T (Tag_PCS_config
),
26216 T (Tag_ABI_PCS_R9_use
),
26217 T (Tag_ABI_PCS_RW_data
),
26218 T (Tag_ABI_PCS_RO_data
),
26219 T (Tag_ABI_PCS_GOT_use
),
26220 T (Tag_ABI_PCS_wchar_t
),
26221 T (Tag_ABI_FP_rounding
),
26222 T (Tag_ABI_FP_denormal
),
26223 T (Tag_ABI_FP_exceptions
),
26224 T (Tag_ABI_FP_user_exceptions
),
26225 T (Tag_ABI_FP_number_model
),
26226 T (Tag_ABI_align_needed
),
26227 T (Tag_ABI_align8_needed
),
26228 T (Tag_ABI_align_preserved
),
26229 T (Tag_ABI_align8_preserved
),
26230 T (Tag_ABI_enum_size
),
26231 T (Tag_ABI_HardFP_use
),
26232 T (Tag_ABI_VFP_args
),
26233 T (Tag_ABI_WMMX_args
),
26234 T (Tag_ABI_optimization_goals
),
26235 T (Tag_ABI_FP_optimization_goals
),
26236 T (Tag_compatibility
),
26237 T (Tag_CPU_unaligned_access
),
26238 T (Tag_FP_HP_extension
),
26239 T (Tag_VFP_HP_extension
),
26240 T (Tag_ABI_FP_16bit_format
),
26241 T (Tag_MPextension_use
),
26243 T (Tag_nodefaults
),
26244 T (Tag_also_compatible_with
),
26245 T (Tag_conformance
),
26247 T (Tag_Virtualization_use
),
26248 /* We deliberately do not include Tag_MPextension_use_legacy. */
26256 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
26257 if (streq (name
, attribute_table
[i
].name
))
26258 return attribute_table
[i
].tag
;
26264 /* Apply sym value for relocations only in the case that they are for
26265 local symbols in the same segment as the fixup and you have the
26266 respective architectural feature for blx and simple switches. */
26268 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
26271 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
26272 /* PR 17444: If the local symbol is in a different section then a reloc
26273 will always be generated for it, so applying the symbol value now
26274 will result in a double offset being stored in the relocation. */
26275 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
26276 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
26278 switch (fixP
->fx_r_type
)
26280 case BFD_RELOC_ARM_PCREL_BLX
:
26281 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26282 if (ARM_IS_FUNC (fixP
->fx_addsy
))
26286 case BFD_RELOC_ARM_PCREL_CALL
:
26287 case BFD_RELOC_THUMB_PCREL_BLX
:
26288 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
26299 #endif /* OBJ_ELF */