1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS
,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result
;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant
;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax
= FALSE
;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set
*legacy_cpu
= NULL
;
158 static const arm_feature_set
*legacy_fpu
= NULL
;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
162 static arm_feature_set
*mcpu_ext_opt
= NULL
;
163 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set
*march_cpu_opt
= NULL
;
167 static arm_feature_set
*march_ext_opt
= NULL
;
168 static const arm_feature_set
*march_fpu_opt
= NULL
;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set
*mfpu_opt
= NULL
;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
176 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
179 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
180 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
182 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
184 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
187 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
190 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
191 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
192 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
193 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
194 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
195 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
196 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
197 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
198 static const arm_feature_set arm_ext_v4t_5
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
200 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
201 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
202 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
203 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
204 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
205 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
206 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2
=
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
210 static const arm_feature_set arm_ext_v6_notm
=
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
212 static const arm_feature_set arm_ext_v6_dsp
=
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
214 static const arm_feature_set arm_ext_barrier
=
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
216 static const arm_feature_set arm_ext_msr
=
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
218 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
219 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
220 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
221 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
225 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
226 static const arm_feature_set arm_ext_m
=
227 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
228 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
229 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
230 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
231 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
232 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
233 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
234 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
235 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
236 static const arm_feature_set arm_ext_v8m_main
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
238 static const arm_feature_set arm_ext_v8_1m_main
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only
=
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
243 static const arm_feature_set arm_ext_v6t2_v8m
=
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics
=
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp
=
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
253 static const arm_feature_set arm_ext_ras
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16
=
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
258 static const arm_feature_set arm_ext_fp16_fml
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
260 static const arm_feature_set arm_ext_v8_2
=
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
262 static const arm_feature_set arm_ext_v8_3
=
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
264 static const arm_feature_set arm_ext_sb
=
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
266 static const arm_feature_set arm_ext_predres
=
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
269 static const arm_feature_set arm_arch_any
= ARM_ANY
;
271 static const arm_feature_set fpu_any
= FPU_ANY
;
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
275 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
277 static const arm_feature_set arm_cext_iwmmxt2
=
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
279 static const arm_feature_set arm_cext_iwmmxt
=
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
281 static const arm_feature_set arm_cext_xscale
=
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
283 static const arm_feature_set arm_cext_maverick
=
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
285 static const arm_feature_set fpu_fpa_ext_v1
=
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
287 static const arm_feature_set fpu_fpa_ext_v2
=
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
289 static const arm_feature_set fpu_vfp_ext_v1xd
=
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
291 static const arm_feature_set fpu_vfp_ext_v1
=
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
293 static const arm_feature_set fpu_vfp_ext_v2
=
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
295 static const arm_feature_set fpu_vfp_ext_v3xd
=
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
297 static const arm_feature_set fpu_vfp_ext_v3
=
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
299 static const arm_feature_set fpu_vfp_ext_d32
=
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
301 static const arm_feature_set fpu_neon_ext_v1
=
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
306 static const arm_feature_set fpu_vfp_fp16
=
307 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
308 static const arm_feature_set fpu_neon_ext_fma
=
309 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
311 static const arm_feature_set fpu_vfp_ext_fma
=
312 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
313 static const arm_feature_set fpu_vfp_ext_armv8
=
314 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
315 static const arm_feature_set fpu_vfp_ext_armv8xd
=
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
317 static const arm_feature_set fpu_neon_ext_armv8
=
318 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
319 static const arm_feature_set fpu_crypto_ext_armv8
=
320 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
321 static const arm_feature_set crc_ext_armv8
=
322 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
323 static const arm_feature_set fpu_neon_ext_v8_1
=
324 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
325 static const arm_feature_set fpu_neon_ext_dotprod
=
326 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
328 static int mfloat_abi_opt
= -1;
329 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
331 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
332 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
334 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
335 /* Feature bits selected by the last -mcpu/-march or by the combination of the
336 last .cpu/.arch directive .arch_extension directives since that
338 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
339 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
340 static arm_feature_set selected_fpu
= FPU_NONE
;
341 /* Feature bits selected by the last .object_arch directive. */
342 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
343 /* Must be long enough to hold any of the names in arm_cpus. */
344 static char selected_cpu_name
[20];
346 extern FLONUM_TYPE generic_floating_point_number
;
348 /* Return if no cpu was selected on command-line. */
350 no_cpu_selected (void)
352 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
357 static int meabi_flags
= EABI_DEFAULT
;
359 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
362 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
367 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
372 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
373 symbolS
* GOT_symbol
;
376 /* 0: assemble for ARM,
377 1: assemble for Thumb,
378 2: assemble for Thumb even though target CPU does not support thumb
380 static int thumb_mode
= 0;
381 /* A value distinct from the possible values for thumb_mode that we
382 can use to record whether thumb_mode has been copied into the
383 tc_frag_data field of a frag. */
384 #define MODE_RECORDED (1 << 4)
386 /* Specifies the intrinsic IT insn behavior mode. */
387 enum implicit_it_mode
389 IMPLICIT_IT_MODE_NEVER
= 0x00,
390 IMPLICIT_IT_MODE_ARM
= 0x01,
391 IMPLICIT_IT_MODE_THUMB
= 0x02,
392 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
394 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
396 /* If unified_syntax is true, we are processing the new unified
397 ARM/Thumb syntax. Important differences from the old ARM mode:
399 - Immediate operands do not require a # prefix.
400 - Conditional affixes always appear at the end of the
401 instruction. (For backward compatibility, those instructions
402 that formerly had them in the middle, continue to accept them
404 - The IT instruction may appear, and if it does is validated
405 against subsequent conditional affixes. It does not generate
408 Important differences from the old Thumb mode:
410 - Immediate operands do not require a # prefix.
411 - Most of the V6T2 instructions are only available in unified mode.
412 - The .N and .W suffixes are recognized and honored (it is an error
413 if they cannot be honored).
414 - All instructions set the flags if and only if they have an 's' affix.
415 - Conditional affixes may be used. They are validated against
416 preceding IT instructions. Unlike ARM mode, you cannot use a
417 conditional affix except in the scope of an IT instruction. */
419 static bfd_boolean unified_syntax
= FALSE
;
421 /* An immediate operand can start with #, and ld*, st*, pld operands
422 can contain [ and ]. We need to tell APP not to elide whitespace
423 before a [, which can appear as the first operand for pld.
424 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
425 const char arm_symbol_chars
[] = "#[]{}";
440 enum neon_el_type type
;
444 #define NEON_MAX_TYPE_ELS 4
448 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
452 enum it_instruction_type
457 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
458 if inside, should be the last one. */
459 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
460 i.e. BKPT and NOP. */
461 IT_INSN
/* The IT insn has been parsed. */
464 /* The maximum number of operands we need. */
465 #define ARM_IT_MAX_OPERANDS 6
466 #define ARM_IT_MAX_RELOCS 3
471 unsigned long instruction
;
475 /* "uncond_value" is set to the value in place of the conditional field in
476 unconditional versions of the instruction, or -1 if nothing is
479 struct neon_type vectype
;
480 /* This does not indicate an actual NEON instruction, only that
481 the mnemonic accepts neon-style type suffixes. */
483 /* Set to the opcode if the instruction needs relaxation.
484 Zero if the instruction is not relaxed. */
488 bfd_reloc_code_real_type type
;
491 } relocs
[ARM_IT_MAX_RELOCS
];
493 enum it_instruction_type it_insn_type
;
499 struct neon_type_el vectype
;
500 unsigned present
: 1; /* Operand present. */
501 unsigned isreg
: 1; /* Operand was a register. */
502 unsigned immisreg
: 1; /* .imm field is a second register. */
503 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
504 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
505 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
506 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
507 instructions. This allows us to disambiguate ARM <-> vector insns. */
508 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
509 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
510 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
511 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
512 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
513 unsigned writeback
: 1; /* Operand has trailing ! */
514 unsigned preind
: 1; /* Preindexed address. */
515 unsigned postind
: 1; /* Postindexed address. */
516 unsigned negative
: 1; /* Index register was negated. */
517 unsigned shifted
: 1; /* Shift applied to operation. */
518 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
519 } operands
[ARM_IT_MAX_OPERANDS
];
522 static struct arm_it inst
;
524 #define NUM_FLOAT_VALS 8
526 const char * fp_const
[] =
528 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
531 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
541 #define CP_T_X 0x00008000
542 #define CP_T_Y 0x00400000
544 #define CONDS_BIT 0x00100000
545 #define LOAD_BIT 0x00100000
547 #define DOUBLE_LOAD_FLAG 0x00000001
551 const char * template_name
;
555 #define COND_ALWAYS 0xE
559 const char * template_name
;
563 struct asm_barrier_opt
565 const char * template_name
;
567 const arm_feature_set arch
;
570 /* The bit that distinguishes CPSR and SPSR. */
571 #define SPSR_BIT (1 << 22)
573 /* The individual PSR flag bits. */
574 #define PSR_c (1 << 16)
575 #define PSR_x (1 << 17)
576 #define PSR_s (1 << 18)
577 #define PSR_f (1 << 19)
582 bfd_reloc_code_real_type reloc
;
587 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
588 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
593 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
596 /* Bits for DEFINED field in neon_typed_alias. */
597 #define NTA_HASTYPE 1
598 #define NTA_HASINDEX 2
600 struct neon_typed_alias
602 unsigned char defined
;
604 struct neon_type_el eltype
;
607 /* ARM register categories. This includes coprocessor numbers and various
608 architecture extensions' registers. Each entry should have an error message
609 in reg_expected_msgs below. */
637 /* Structure for a hash table entry for a register.
638 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
639 information which states whether a vector type or index is specified (for a
640 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
646 unsigned char builtin
;
647 struct neon_typed_alias
* neon
;
650 /* Diagnostics used when we don't get a register of the expected type. */
651 const char * const reg_expected_msgs
[] =
653 [REG_TYPE_RN
] = N_("ARM register expected"),
654 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
655 [REG_TYPE_CN
] = N_("co-processor register expected"),
656 [REG_TYPE_FN
] = N_("FPA register expected"),
657 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
658 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
659 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
660 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
661 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
662 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
663 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
665 [REG_TYPE_VFC
] = N_("VFP system register expected"),
666 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
667 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
668 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
669 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
670 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
671 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
672 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
673 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
674 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
675 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
676 [REG_TYPE_RNB
] = N_("")
679 /* Some well known registers that we refer to directly elsewhere. */
685 /* ARM instructions take 4bytes in the object file, Thumb instructions
691 /* Basic string to match. */
692 const char * template_name
;
694 /* Parameters to instruction. */
695 unsigned int operands
[8];
697 /* Conditional tag - see opcode_lookup. */
698 unsigned int tag
: 4;
700 /* Basic instruction code. */
701 unsigned int avalue
: 28;
703 /* Thumb-format instruction code. */
706 /* Which architecture variant provides this instruction. */
707 const arm_feature_set
* avariant
;
708 const arm_feature_set
* tvariant
;
710 /* Function to call to encode instruction in ARM format. */
711 void (* aencode
) (void);
713 /* Function to call to encode instruction in Thumb format. */
714 void (* tencode
) (void);
717 /* Defines for various bits that we will want to toggle. */
718 #define INST_IMMEDIATE 0x02000000
719 #define OFFSET_REG 0x02000000
720 #define HWOFFSET_IMM 0x00400000
721 #define SHIFT_BY_REG 0x00000010
722 #define PRE_INDEX 0x01000000
723 #define INDEX_UP 0x00800000
724 #define WRITE_BACK 0x00200000
725 #define LDM_TYPE_2_OR_3 0x00400000
726 #define CPSI_MMOD 0x00020000
728 #define LITERAL_MASK 0xf000f000
729 #define OPCODE_MASK 0xfe1fffff
730 #define V4_STR_BIT 0x00000020
731 #define VLDR_VMOV_SAME 0x0040f000
733 #define T2_SUBS_PC_LR 0xf3de8f00
735 #define DATA_OP_SHIFT 21
736 #define SBIT_SHIFT 20
738 #define T2_OPCODE_MASK 0xfe1fffff
739 #define T2_DATA_OP_SHIFT 21
740 #define T2_SBIT_SHIFT 20
742 #define A_COND_MASK 0xf0000000
743 #define A_PUSH_POP_OP_MASK 0x0fff0000
745 /* Opcodes for pushing/poping registers to/from the stack. */
746 #define A1_OPCODE_PUSH 0x092d0000
747 #define A2_OPCODE_PUSH 0x052d0004
748 #define A2_OPCODE_POP 0x049d0004
750 /* Codes to distinguish the arithmetic instructions. */
761 #define OPCODE_CMP 10
762 #define OPCODE_CMN 11
763 #define OPCODE_ORR 12
764 #define OPCODE_MOV 13
765 #define OPCODE_BIC 14
766 #define OPCODE_MVN 15
768 #define T2_OPCODE_AND 0
769 #define T2_OPCODE_BIC 1
770 #define T2_OPCODE_ORR 2
771 #define T2_OPCODE_ORN 3
772 #define T2_OPCODE_EOR 4
773 #define T2_OPCODE_ADD 8
774 #define T2_OPCODE_ADC 10
775 #define T2_OPCODE_SBC 11
776 #define T2_OPCODE_SUB 13
777 #define T2_OPCODE_RSB 14
779 #define T_OPCODE_MUL 0x4340
780 #define T_OPCODE_TST 0x4200
781 #define T_OPCODE_CMN 0x42c0
782 #define T_OPCODE_NEG 0x4240
783 #define T_OPCODE_MVN 0x43c0
785 #define T_OPCODE_ADD_R3 0x1800
786 #define T_OPCODE_SUB_R3 0x1a00
787 #define T_OPCODE_ADD_HI 0x4400
788 #define T_OPCODE_ADD_ST 0xb000
789 #define T_OPCODE_SUB_ST 0xb080
790 #define T_OPCODE_ADD_SP 0xa800
791 #define T_OPCODE_ADD_PC 0xa000
792 #define T_OPCODE_ADD_I8 0x3000
793 #define T_OPCODE_SUB_I8 0x3800
794 #define T_OPCODE_ADD_I3 0x1c00
795 #define T_OPCODE_SUB_I3 0x1e00
797 #define T_OPCODE_ASR_R 0x4100
798 #define T_OPCODE_LSL_R 0x4080
799 #define T_OPCODE_LSR_R 0x40c0
800 #define T_OPCODE_ROR_R 0x41c0
801 #define T_OPCODE_ASR_I 0x1000
802 #define T_OPCODE_LSL_I 0x0000
803 #define T_OPCODE_LSR_I 0x0800
805 #define T_OPCODE_MOV_I8 0x2000
806 #define T_OPCODE_CMP_I8 0x2800
807 #define T_OPCODE_CMP_LR 0x4280
808 #define T_OPCODE_MOV_HR 0x4600
809 #define T_OPCODE_CMP_HR 0x4500
811 #define T_OPCODE_LDR_PC 0x4800
812 #define T_OPCODE_LDR_SP 0x9800
813 #define T_OPCODE_STR_SP 0x9000
814 #define T_OPCODE_LDR_IW 0x6800
815 #define T_OPCODE_STR_IW 0x6000
816 #define T_OPCODE_LDR_IH 0x8800
817 #define T_OPCODE_STR_IH 0x8000
818 #define T_OPCODE_LDR_IB 0x7800
819 #define T_OPCODE_STR_IB 0x7000
820 #define T_OPCODE_LDR_RW 0x5800
821 #define T_OPCODE_STR_RW 0x5000
822 #define T_OPCODE_LDR_RH 0x5a00
823 #define T_OPCODE_STR_RH 0x5200
824 #define T_OPCODE_LDR_RB 0x5c00
825 #define T_OPCODE_STR_RB 0x5400
827 #define T_OPCODE_PUSH 0xb400
828 #define T_OPCODE_POP 0xbc00
830 #define T_OPCODE_BRANCH 0xe000
832 #define THUMB_SIZE 2 /* Size of thumb instruction. */
833 #define THUMB_PP_PC_LR 0x0100
834 #define THUMB_LOAD_BIT 0x0800
835 #define THUMB2_LOAD_BIT 0x00100000
837 #define BAD_ARGS _("bad arguments to instruction")
838 #define BAD_SP _("r13 not allowed here")
839 #define BAD_PC _("r15 not allowed here")
840 #define BAD_COND _("instruction cannot be conditional")
841 #define BAD_OVERLAP _("registers may not be the same")
842 #define BAD_HIREG _("lo register required")
843 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
844 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
845 #define BAD_BRANCH _("branch must be last instruction in IT block")
846 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
847 #define BAD_NOT_IT _("instruction not allowed in IT block")
848 #define BAD_FPU _("selected FPU does not support instruction")
849 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
850 #define BAD_IT_COND _("incorrect condition in IT block")
851 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
852 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
853 #define BAD_PC_ADDRESSING \
854 _("cannot use register index with PC-relative addressing")
855 #define BAD_PC_WRITEBACK \
856 _("cannot use writeback with PC-relative addressing")
857 #define BAD_RANGE _("branch out of range")
858 #define BAD_FP16 _("selected processor does not support fp16 instruction")
859 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
860 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
862 static struct hash_control
* arm_ops_hsh
;
863 static struct hash_control
* arm_cond_hsh
;
864 static struct hash_control
* arm_shift_hsh
;
865 static struct hash_control
* arm_psr_hsh
;
866 static struct hash_control
* arm_v7m_psr_hsh
;
867 static struct hash_control
* arm_reg_hsh
;
868 static struct hash_control
* arm_reloc_hsh
;
869 static struct hash_control
* arm_barrier_opt_hsh
;
871 /* Stuff needed to resolve the label ambiguity
880 symbolS
* last_label_seen
;
881 static int label_is_thumb_function_name
= FALSE
;
883 /* Literal pool structure. Held on a per-section
884 and per-sub-section basis. */
886 #define MAX_LITERAL_POOL_SIZE 1024
887 typedef struct literal_pool
889 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
890 unsigned int next_free_entry
;
896 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
898 struct literal_pool
* next
;
899 unsigned int alignment
;
902 /* Pointer to a linked list of literal pools. */
903 literal_pool
* list_of_pools
= NULL
;
905 typedef enum asmfunc_states
908 WAITING_ASMFUNC_NAME
,
912 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
915 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
917 static struct current_it now_it
;
921 now_it_compatible (int cond
)
923 return (cond
& ~1) == (now_it
.cc
& ~1);
927 conditional_insn (void)
929 return inst
.cond
!= COND_ALWAYS
;
932 static int in_it_block (void);
934 static int handle_it_state (void);
936 static void force_automatic_it_block_close (void);
938 static void it_fsm_post_encode (void);
940 #define set_it_insn_type(type) \
943 inst.it_insn_type = type; \
944 if (handle_it_state () == FAIL) \
949 #define set_it_insn_type_nonvoid(type, failret) \
952 inst.it_insn_type = type; \
953 if (handle_it_state () == FAIL) \
958 #define set_it_insn_type_last() \
961 if (inst.cond == COND_ALWAYS) \
962 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
964 set_it_insn_type (INSIDE_IT_LAST_INSN); \
970 /* This array holds the chars that always start a comment. If the
971 pre-processor is disabled, these aren't very useful. */
972 char arm_comment_chars
[] = "@";
974 /* This array holds the chars that only start a comment at the beginning of
975 a line. If the line seems to have the form '# 123 filename'
976 .line and .file directives will appear in the pre-processed output. */
977 /* Note that input_file.c hand checks for '#' at the beginning of the
978 first line of the input file. This is because the compiler outputs
979 #NO_APP at the beginning of its output. */
980 /* Also note that comments like this one will always work. */
981 const char line_comment_chars
[] = "#";
983 char arm_line_separator_chars
[] = ";";
985 /* Chars that can be used to separate mant
986 from exp in floating point numbers. */
987 const char EXP_CHARS
[] = "eE";
989 /* Chars that mean this number is a floating point constant. */
993 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
995 /* Prefix characters that indicate the start of an immediate
997 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
999 /* Separator character handling. */
1001 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1004 skip_past_char (char ** str
, char c
)
1006 /* PR gas/14987: Allow for whitespace before the expected character. */
1007 skip_whitespace (*str
);
1018 #define skip_past_comma(str) skip_past_char (str, ',')
1020 /* Arithmetic expressions (possibly involving symbols). */
1022 /* Return TRUE if anything in the expression is a bignum. */
1025 walk_no_bignums (symbolS
* sp
)
1027 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1030 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1032 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1033 || (symbol_get_value_expression (sp
)->X_op_symbol
1034 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1040 static bfd_boolean in_my_get_expression
= FALSE
;
1042 /* Third argument to my_get_expression. */
1043 #define GE_NO_PREFIX 0
1044 #define GE_IMM_PREFIX 1
1045 #define GE_OPT_PREFIX 2
1046 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1047 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1048 #define GE_OPT_PREFIX_BIG 3
1051 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1055 /* In unified syntax, all prefixes are optional. */
1057 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1060 switch (prefix_mode
)
1062 case GE_NO_PREFIX
: break;
1064 if (!is_immediate_prefix (**str
))
1066 inst
.error
= _("immediate expression requires a # prefix");
1072 case GE_OPT_PREFIX_BIG
:
1073 if (is_immediate_prefix (**str
))
1080 memset (ep
, 0, sizeof (expressionS
));
1082 save_in
= input_line_pointer
;
1083 input_line_pointer
= *str
;
1084 in_my_get_expression
= TRUE
;
1086 in_my_get_expression
= FALSE
;
1088 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1090 /* We found a bad or missing expression in md_operand(). */
1091 *str
= input_line_pointer
;
1092 input_line_pointer
= save_in
;
1093 if (inst
.error
== NULL
)
1094 inst
.error
= (ep
->X_op
== O_absent
1095 ? _("missing expression") :_("bad expression"));
1099 /* Get rid of any bignums now, so that we don't generate an error for which
1100 we can't establish a line number later on. Big numbers are never valid
1101 in instructions, which is where this routine is always called. */
1102 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1103 && (ep
->X_op
== O_big
1104 || (ep
->X_add_symbol
1105 && (walk_no_bignums (ep
->X_add_symbol
)
1107 && walk_no_bignums (ep
->X_op_symbol
))))))
1109 inst
.error
= _("invalid constant");
1110 *str
= input_line_pointer
;
1111 input_line_pointer
= save_in
;
1115 *str
= input_line_pointer
;
1116 input_line_pointer
= save_in
;
1120 /* Turn a string in input_line_pointer into a floating point constant
1121 of type TYPE, and store the appropriate bytes in *LITP. The number
1122 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1123 returned, or NULL on OK.
1125 Note that fp constants aren't represent in the normal way on the ARM.
1126 In big endian mode, things are as expected. However, in little endian
1127 mode fp constants are big-endian word-wise, and little-endian byte-wise
1128 within the words. For example, (double) 1.1 in big endian mode is
1129 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1130 the byte sequence 99 99 f1 3f 9a 99 99 99.
1132 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1135 md_atof (int type
, char * litP
, int * sizeP
)
1138 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1170 return _("Unrecognized or unsupported floating point constant");
1173 t
= atof_ieee (input_line_pointer
, type
, words
);
1175 input_line_pointer
= t
;
1176 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1178 if (target_big_endian
)
1180 for (i
= 0; i
< prec
; i
++)
1182 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1183 litP
+= sizeof (LITTLENUM_TYPE
);
1188 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1189 for (i
= prec
- 1; i
>= 0; i
--)
1191 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1192 litP
+= sizeof (LITTLENUM_TYPE
);
1195 /* For a 4 byte float the order of elements in `words' is 1 0.
1196 For an 8 byte float the order is 1 0 3 2. */
1197 for (i
= 0; i
< prec
; i
+= 2)
1199 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1200 sizeof (LITTLENUM_TYPE
));
1201 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1202 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1203 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1210 /* We handle all bad expressions here, so that we can report the faulty
1211 instruction in the error message. */
1214 md_operand (expressionS
* exp
)
1216 if (in_my_get_expression
)
1217 exp
->X_op
= O_illegal
;
1220 /* Immediate values. */
1223 /* Generic immediate-value read function for use in directives.
1224 Accepts anything that 'expression' can fold to a constant.
1225 *val receives the number. */
1228 immediate_for_directive (int *val
)
1231 exp
.X_op
= O_illegal
;
1233 if (is_immediate_prefix (*input_line_pointer
))
1235 input_line_pointer
++;
1239 if (exp
.X_op
!= O_constant
)
1241 as_bad (_("expected #constant"));
1242 ignore_rest_of_line ();
1245 *val
= exp
.X_add_number
;
1250 /* Register parsing. */
1252 /* Generic register parser. CCP points to what should be the
1253 beginning of a register name. If it is indeed a valid register
1254 name, advance CCP over it and return the reg_entry structure;
1255 otherwise return NULL. Does not issue diagnostics. */
1257 static struct reg_entry
*
1258 arm_reg_parse_multi (char **ccp
)
1262 struct reg_entry
*reg
;
1264 skip_whitespace (start
);
1266 #ifdef REGISTER_PREFIX
1267 if (*start
!= REGISTER_PREFIX
)
1271 #ifdef OPTIONAL_REGISTER_PREFIX
1272 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1277 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1282 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1284 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1294 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1295 enum arm_reg_type type
)
1297 /* Alternative syntaxes are accepted for a few register classes. */
1304 /* Generic coprocessor register names are allowed for these. */
1305 if (reg
&& reg
->type
== REG_TYPE_CN
)
1310 /* For backward compatibility, a bare number is valid here. */
1312 unsigned long processor
= strtoul (start
, ccp
, 10);
1313 if (*ccp
!= start
&& processor
<= 15)
1318 case REG_TYPE_MMXWC
:
1319 /* WC includes WCG. ??? I'm not sure this is true for all
1320 instructions that take WC registers. */
1321 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1332 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1333 return value is the register number or FAIL. */
1336 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1339 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1342 /* Do not allow a scalar (reg+index) to parse as a register. */
1343 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1346 if (reg
&& reg
->type
== type
)
1349 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1356 /* Parse a Neon type specifier. *STR should point at the leading '.'
1357 character. Does no verification at this stage that the type fits the opcode
1364 Can all be legally parsed by this function.
1366 Fills in neon_type struct pointer with parsed information, and updates STR
1367 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1368 type, FAIL if not. */
1371 parse_neon_type (struct neon_type
*type
, char **str
)
1378 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1380 enum neon_el_type thistype
= NT_untyped
;
1381 unsigned thissize
= -1u;
1388 /* Just a size without an explicit type. */
1392 switch (TOLOWER (*ptr
))
1394 case 'i': thistype
= NT_integer
; break;
1395 case 'f': thistype
= NT_float
; break;
1396 case 'p': thistype
= NT_poly
; break;
1397 case 's': thistype
= NT_signed
; break;
1398 case 'u': thistype
= NT_unsigned
; break;
1400 thistype
= NT_float
;
1405 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1411 /* .f is an abbreviation for .f32. */
1412 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1417 thissize
= strtoul (ptr
, &ptr
, 10);
1419 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1422 as_bad (_("bad size %d in type specifier"), thissize
);
1430 type
->el
[type
->elems
].type
= thistype
;
1431 type
->el
[type
->elems
].size
= thissize
;
1436 /* Empty/missing type is not a successful parse. */
1437 if (type
->elems
== 0)
1445 /* Errors may be set multiple times during parsing or bit encoding
1446 (particularly in the Neon bits), but usually the earliest error which is set
1447 will be the most meaningful. Avoid overwriting it with later (cascading)
1448 errors by calling this function. */
1451 first_error (const char *err
)
1457 /* Parse a single type, e.g. ".s32", leading period included. */
1459 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1462 struct neon_type optype
;
1466 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1468 if (optype
.elems
== 1)
1469 *vectype
= optype
.el
[0];
1472 first_error (_("only one type should be specified for operand"));
1478 first_error (_("vector type expected"));
1490 /* Special meanings for indices (which have a range of 0-7), which will fit into
1493 #define NEON_ALL_LANES 15
1494 #define NEON_INTERLEAVE_LANES 14
1496 /* Parse either a register or a scalar, with an optional type. Return the
1497 register number, and optionally fill in the actual type of the register
1498 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1499 type/index information in *TYPEINFO. */
1502 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1503 enum arm_reg_type
*rtype
,
1504 struct neon_typed_alias
*typeinfo
)
1507 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1508 struct neon_typed_alias atype
;
1509 struct neon_type_el parsetype
;
1513 atype
.eltype
.type
= NT_invtype
;
1514 atype
.eltype
.size
= -1;
1516 /* Try alternate syntax for some types of register. Note these are mutually
1517 exclusive with the Neon syntax extensions. */
1520 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1528 /* Undo polymorphism when a set of register types may be accepted. */
1529 if ((type
== REG_TYPE_NDQ
1530 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1531 || (type
== REG_TYPE_VFSD
1532 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1533 || (type
== REG_TYPE_NSDQ
1534 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1535 || reg
->type
== REG_TYPE_NQ
))
1536 || (type
== REG_TYPE_NSD
1537 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1538 || (type
== REG_TYPE_MMXWC
1539 && (reg
->type
== REG_TYPE_MMXWCG
)))
1540 type
= (enum arm_reg_type
) reg
->type
;
1542 if (type
!= reg
->type
)
1548 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1550 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1552 first_error (_("can't redefine type for operand"));
1555 atype
.defined
|= NTA_HASTYPE
;
1556 atype
.eltype
= parsetype
;
1559 if (skip_past_char (&str
, '[') == SUCCESS
)
1561 if (type
!= REG_TYPE_VFD
1562 && !(type
== REG_TYPE_VFS
1563 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
)))
1565 first_error (_("only D registers may be indexed"));
1569 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1571 first_error (_("can't change index for operand"));
1575 atype
.defined
|= NTA_HASINDEX
;
1577 if (skip_past_char (&str
, ']') == SUCCESS
)
1578 atype
.index
= NEON_ALL_LANES
;
1583 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1585 if (exp
.X_op
!= O_constant
)
1587 first_error (_("constant expression required"));
1591 if (skip_past_char (&str
, ']') == FAIL
)
1594 atype
.index
= exp
.X_add_number
;
1609 /* Like arm_reg_parse, but also allow the following extra features:
1610 - If RTYPE is non-zero, return the (possibly restricted) type of the
1611 register (e.g. Neon double or quad reg when either has been requested).
1612 - If this is a Neon vector type with additional type information, fill
1613 in the struct pointed to by VECTYPE (if non-NULL).
1614 This function will fault on encountering a scalar. */
1617 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1618 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1620 struct neon_typed_alias atype
;
1622 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1627 /* Do not allow regname(... to parse as a register. */
1631 /* Do not allow a scalar (reg+index) to parse as a register. */
1632 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1634 first_error (_("register operand expected, but got scalar"));
1639 *vectype
= atype
.eltype
;
1646 #define NEON_SCALAR_REG(X) ((X) >> 4)
1647 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1649 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1650 have enough information to be able to do a good job bounds-checking. So, we
1651 just do easy checks here, and do further checks later. */
1654 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1658 struct neon_typed_alias atype
;
1659 enum arm_reg_type reg_type
= REG_TYPE_VFD
;
1662 reg_type
= REG_TYPE_VFS
;
1664 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1666 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1669 if (atype
.index
== NEON_ALL_LANES
)
1671 first_error (_("scalar must have an index"));
1674 else if (atype
.index
>= 64 / elsize
)
1676 first_error (_("scalar index out of range"));
1681 *type
= atype
.eltype
;
1685 return reg
* 16 + atype
.index
;
1688 /* Types of registers in a list. */
1701 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1704 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1710 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
);
1712 /* We come back here if we get ranges concatenated by '+' or '|'. */
1715 skip_whitespace (str
);
1728 const char apsr_str
[] = "apsr";
1729 int apsr_str_len
= strlen (apsr_str
);
1731 reg
= arm_reg_parse (&str
, REGLIST_RN
);
1732 if (etype
== REGLIST_CLRM
)
1734 if (reg
== REG_SP
|| reg
== REG_PC
)
1736 else if (reg
== FAIL
1737 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1738 && !ISALPHA (*(str
+ apsr_str_len
)))
1741 str
+= apsr_str_len
;
1746 first_error (_("r0-r12, lr or APSR expected"));
1750 else /* etype == REGLIST_RN. */
1754 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1765 first_error (_("bad range in register list"));
1769 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1771 if (range
& (1 << i
))
1773 (_("Warning: duplicated register (r%d) in register list"),
1781 if (range
& (1 << reg
))
1782 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1784 else if (reg
<= cur_reg
)
1785 as_tsktsk (_("Warning: register range not in ascending order"));
1790 while (skip_past_comma (&str
) != FAIL
1791 || (in_range
= 1, *str
++ == '-'));
1794 if (skip_past_char (&str
, '}') == FAIL
)
1796 first_error (_("missing `}'"));
1800 else if (etype
== REGLIST_RN
)
1804 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1807 if (exp
.X_op
== O_constant
)
1809 if (exp
.X_add_number
1810 != (exp
.X_add_number
& 0x0000ffff))
1812 inst
.error
= _("invalid register mask");
1816 if ((range
& exp
.X_add_number
) != 0)
1818 int regno
= range
& exp
.X_add_number
;
1821 regno
= (1 << regno
) - 1;
1823 (_("Warning: duplicated register (r%d) in register list"),
1827 range
|= exp
.X_add_number
;
1831 if (inst
.relocs
[0].type
!= 0)
1833 inst
.error
= _("expression too complex");
1837 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
1838 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
1839 inst
.relocs
[0].pc_rel
= 0;
1843 if (*str
== '|' || *str
== '+')
1849 while (another_range
);
1855 /* Parse a VFP register list. If the string is invalid return FAIL.
1856 Otherwise return the number of registers, and set PBASE to the first
1857 register. Parses registers of type ETYPE.
1858 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1859 - Q registers can be used to specify pairs of D registers
1860 - { } can be omitted from around a singleton register list
1861 FIXME: This is not implemented, as it would require backtracking in
1864 This could be done (the meaning isn't really ambiguous), but doesn't
1865 fit in well with the current parsing framework.
1866 - 32 D registers may be used (also true for VFPv3).
1867 FIXME: Types are ignored in these register lists, which is probably a
1871 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
,
1872 bfd_boolean
*partial_match
)
1877 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1881 unsigned long mask
= 0;
1883 bfd_boolean vpr_seen
= FALSE
;
1884 bfd_boolean expect_vpr
=
1885 (etype
== REGLIST_VFP_S_VPR
) || (etype
== REGLIST_VFP_D_VPR
);
1887 if (skip_past_char (&str
, '{') == FAIL
)
1889 inst
.error
= _("expecting {");
1896 case REGLIST_VFP_S_VPR
:
1897 regtype
= REG_TYPE_VFS
;
1902 case REGLIST_VFP_D_VPR
:
1903 regtype
= REG_TYPE_VFD
;
1906 case REGLIST_NEON_D
:
1907 regtype
= REG_TYPE_NDQ
;
1914 if (etype
!= REGLIST_VFP_S
&& etype
!= REGLIST_VFP_S_VPR
)
1916 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1917 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1921 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1924 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1931 base_reg
= max_regs
;
1932 *partial_match
= FALSE
;
1936 int setmask
= 1, addregs
= 1;
1937 const char vpr_str
[] = "vpr";
1938 int vpr_str_len
= strlen (vpr_str
);
1940 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1944 if (new_base
== FAIL
1945 && !strncasecmp (str
, vpr_str
, vpr_str_len
)
1946 && !ISALPHA (*(str
+ vpr_str_len
))
1952 base_reg
= 0; /* Canonicalize VPR only on d0 with 0 regs. */
1956 first_error (_("VPR expected last"));
1959 else if (new_base
== FAIL
)
1961 if (regtype
== REG_TYPE_VFS
)
1962 first_error (_("VFP single precision register or VPR "
1964 else /* regtype == REG_TYPE_VFD. */
1965 first_error (_("VFP/Neon double precision register or VPR "
1970 else if (new_base
== FAIL
)
1972 first_error (_(reg_expected_msgs
[regtype
]));
1976 *partial_match
= TRUE
;
1980 if (new_base
>= max_regs
)
1982 first_error (_("register out of range in list"));
1986 /* Note: a value of 2 * n is returned for the register Q<n>. */
1987 if (regtype
== REG_TYPE_NQ
)
1993 if (new_base
< base_reg
)
1994 base_reg
= new_base
;
1996 if (mask
& (setmask
<< new_base
))
1998 first_error (_("invalid register list"));
2002 if ((mask
>> new_base
) != 0 && ! warned
&& !vpr_seen
)
2004 as_tsktsk (_("register list not in ascending order"));
2008 mask
|= setmask
<< new_base
;
2011 if (*str
== '-') /* We have the start of a range expression */
2017 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
2020 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
2024 if (high_range
>= max_regs
)
2026 first_error (_("register out of range in list"));
2030 if (regtype
== REG_TYPE_NQ
)
2031 high_range
= high_range
+ 1;
2033 if (high_range
<= new_base
)
2035 inst
.error
= _("register range not in ascending order");
2039 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2041 if (mask
& (setmask
<< new_base
))
2043 inst
.error
= _("invalid register list");
2047 mask
|= setmask
<< new_base
;
2052 while (skip_past_comma (&str
) != FAIL
);
2056 /* Sanity check -- should have raised a parse error above. */
2057 if ((!vpr_seen
&& count
== 0) || count
> max_regs
)
2062 if (expect_vpr
&& !vpr_seen
)
2064 first_error (_("VPR expected last"));
2068 /* Final test -- the registers must be consecutive. */
2070 for (i
= 0; i
< count
; i
++)
2072 if ((mask
& (1u << i
)) == 0)
2074 inst
.error
= _("non-contiguous register range");
2084 /* True if two alias types are the same. */
2087 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2095 if (a
->defined
!= b
->defined
)
2098 if ((a
->defined
& NTA_HASTYPE
) != 0
2099 && (a
->eltype
.type
!= b
->eltype
.type
2100 || a
->eltype
.size
!= b
->eltype
.size
))
2103 if ((a
->defined
& NTA_HASINDEX
) != 0
2104 && (a
->index
!= b
->index
))
2110 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2111 The base register is put in *PBASE.
2112 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2114 The register stride (minus one) is put in bit 4 of the return value.
2115 Bits [6:5] encode the list length (minus one).
2116 The type of the list elements is put in *ELTYPE, if non-NULL. */
2118 #define NEON_LANE(X) ((X) & 0xf)
2119 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2120 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2123 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2124 struct neon_type_el
*eltype
)
2131 int leading_brace
= 0;
2132 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2133 const char *const incr_error
= _("register stride must be 1 or 2");
2134 const char *const type_error
= _("mismatched element/structure types in list");
2135 struct neon_typed_alias firsttype
;
2136 firsttype
.defined
= 0;
2137 firsttype
.eltype
.type
= NT_invtype
;
2138 firsttype
.eltype
.size
= -1;
2139 firsttype
.index
= -1;
2141 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2146 struct neon_typed_alias atype
;
2147 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2151 first_error (_(reg_expected_msgs
[rtype
]));
2158 if (rtype
== REG_TYPE_NQ
)
2164 else if (reg_incr
== -1)
2166 reg_incr
= getreg
- base_reg
;
2167 if (reg_incr
< 1 || reg_incr
> 2)
2169 first_error (_(incr_error
));
2173 else if (getreg
!= base_reg
+ reg_incr
* count
)
2175 first_error (_(incr_error
));
2179 if (! neon_alias_types_same (&atype
, &firsttype
))
2181 first_error (_(type_error
));
2185 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2189 struct neon_typed_alias htype
;
2190 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2192 lane
= NEON_INTERLEAVE_LANES
;
2193 else if (lane
!= NEON_INTERLEAVE_LANES
)
2195 first_error (_(type_error
));
2200 else if (reg_incr
!= 1)
2202 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2206 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2209 first_error (_(reg_expected_msgs
[rtype
]));
2212 if (! neon_alias_types_same (&htype
, &firsttype
))
2214 first_error (_(type_error
));
2217 count
+= hireg
+ dregs
- getreg
;
2221 /* If we're using Q registers, we can't use [] or [n] syntax. */
2222 if (rtype
== REG_TYPE_NQ
)
2228 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2232 else if (lane
!= atype
.index
)
2234 first_error (_(type_error
));
2238 else if (lane
== -1)
2239 lane
= NEON_INTERLEAVE_LANES
;
2240 else if (lane
!= NEON_INTERLEAVE_LANES
)
2242 first_error (_(type_error
));
2247 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2249 /* No lane set by [x]. We must be interleaving structures. */
2251 lane
= NEON_INTERLEAVE_LANES
;
2254 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2255 || (count
> 1 && reg_incr
== -1))
2257 first_error (_("error parsing element/structure list"));
2261 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2263 first_error (_("expected }"));
2271 *eltype
= firsttype
.eltype
;
2276 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2279 /* Parse an explicit relocation suffix on an expression. This is
2280 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2281 arm_reloc_hsh contains no entries, so this function can only
2282 succeed if there is no () after the word. Returns -1 on error,
2283 BFD_RELOC_UNUSED if there wasn't any suffix. */
2286 parse_reloc (char **str
)
2288 struct reloc_entry
*r
;
2292 return BFD_RELOC_UNUSED
;
2297 while (*q
&& *q
!= ')' && *q
!= ',')
2302 if ((r
= (struct reloc_entry
*)
2303 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2310 /* Directives: register aliases. */
2312 static struct reg_entry
*
2313 insert_reg_alias (char *str
, unsigned number
, int type
)
2315 struct reg_entry
*new_reg
;
2318 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2320 if (new_reg
->builtin
)
2321 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2323 /* Only warn about a redefinition if it's not defined as the
2325 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2326 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2331 name
= xstrdup (str
);
2332 new_reg
= XNEW (struct reg_entry
);
2334 new_reg
->name
= name
;
2335 new_reg
->number
= number
;
2336 new_reg
->type
= type
;
2337 new_reg
->builtin
= FALSE
;
2338 new_reg
->neon
= NULL
;
2340 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2347 insert_neon_reg_alias (char *str
, int number
, int type
,
2348 struct neon_typed_alias
*atype
)
2350 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2354 first_error (_("attempt to redefine typed alias"));
2360 reg
->neon
= XNEW (struct neon_typed_alias
);
2361 *reg
->neon
= *atype
;
2365 /* Look for the .req directive. This is of the form:
2367 new_register_name .req existing_register_name
2369 If we find one, or if it looks sufficiently like one that we want to
2370 handle any error here, return TRUE. Otherwise return FALSE. */
2373 create_register_alias (char * newname
, char *p
)
2375 struct reg_entry
*old
;
2376 char *oldname
, *nbuf
;
2379 /* The input scrubber ensures that whitespace after the mnemonic is
2380 collapsed to single spaces. */
2382 if (strncmp (oldname
, " .req ", 6) != 0)
2386 if (*oldname
== '\0')
2389 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2392 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2396 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2397 the desired alias name, and p points to its end. If not, then
2398 the desired alias name is in the global original_case_string. */
2399 #ifdef TC_CASE_SENSITIVE
2402 newname
= original_case_string
;
2403 nlen
= strlen (newname
);
2406 nbuf
= xmemdup0 (newname
, nlen
);
2408 /* Create aliases under the new name as stated; an all-lowercase
2409 version of the new name; and an all-uppercase version of the new
2411 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2413 for (p
= nbuf
; *p
; p
++)
2416 if (strncmp (nbuf
, newname
, nlen
))
2418 /* If this attempt to create an additional alias fails, do not bother
2419 trying to create the all-lower case alias. We will fail and issue
2420 a second, duplicate error message. This situation arises when the
2421 programmer does something like:
2424 The second .req creates the "Foo" alias but then fails to create
2425 the artificial FOO alias because it has already been created by the
2427 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2434 for (p
= nbuf
; *p
; p
++)
2437 if (strncmp (nbuf
, newname
, nlen
))
2438 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2445 /* Create a Neon typed/indexed register alias using directives, e.g.:
2450 These typed registers can be used instead of the types specified after the
2451 Neon mnemonic, so long as all operands given have types. Types can also be
2452 specified directly, e.g.:
2453 vadd d0.s32, d1.s32, d2.s32 */
2456 create_neon_reg_alias (char *newname
, char *p
)
2458 enum arm_reg_type basetype
;
2459 struct reg_entry
*basereg
;
2460 struct reg_entry mybasereg
;
2461 struct neon_type ntype
;
2462 struct neon_typed_alias typeinfo
;
2463 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2466 typeinfo
.defined
= 0;
2467 typeinfo
.eltype
.type
= NT_invtype
;
2468 typeinfo
.eltype
.size
= -1;
2469 typeinfo
.index
= -1;
2473 if (strncmp (p
, " .dn ", 5) == 0)
2474 basetype
= REG_TYPE_VFD
;
2475 else if (strncmp (p
, " .qn ", 5) == 0)
2476 basetype
= REG_TYPE_NQ
;
2485 basereg
= arm_reg_parse_multi (&p
);
2487 if (basereg
&& basereg
->type
!= basetype
)
2489 as_bad (_("bad type for register"));
2493 if (basereg
== NULL
)
2496 /* Try parsing as an integer. */
2497 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2498 if (exp
.X_op
!= O_constant
)
2500 as_bad (_("expression must be constant"));
2503 basereg
= &mybasereg
;
2504 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2510 typeinfo
= *basereg
->neon
;
2512 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2514 /* We got a type. */
2515 if (typeinfo
.defined
& NTA_HASTYPE
)
2517 as_bad (_("can't redefine the type of a register alias"));
2521 typeinfo
.defined
|= NTA_HASTYPE
;
2522 if (ntype
.elems
!= 1)
2524 as_bad (_("you must specify a single type only"));
2527 typeinfo
.eltype
= ntype
.el
[0];
2530 if (skip_past_char (&p
, '[') == SUCCESS
)
2533 /* We got a scalar index. */
2535 if (typeinfo
.defined
& NTA_HASINDEX
)
2537 as_bad (_("can't redefine the index of a scalar alias"));
2541 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2543 if (exp
.X_op
!= O_constant
)
2545 as_bad (_("scalar index must be constant"));
2549 typeinfo
.defined
|= NTA_HASINDEX
;
2550 typeinfo
.index
= exp
.X_add_number
;
2552 if (skip_past_char (&p
, ']') == FAIL
)
2554 as_bad (_("expecting ]"));
2559 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2560 the desired alias name, and p points to its end. If not, then
2561 the desired alias name is in the global original_case_string. */
2562 #ifdef TC_CASE_SENSITIVE
2563 namelen
= nameend
- newname
;
2565 newname
= original_case_string
;
2566 namelen
= strlen (newname
);
2569 namebuf
= xmemdup0 (newname
, namelen
);
2571 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2572 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2574 /* Insert name in all uppercase. */
2575 for (p
= namebuf
; *p
; p
++)
2578 if (strncmp (namebuf
, newname
, namelen
))
2579 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2580 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2582 /* Insert name in all lowercase. */
2583 for (p
= namebuf
; *p
; p
++)
2586 if (strncmp (namebuf
, newname
, namelen
))
2587 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2588 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2594 /* Should never be called, as .req goes between the alias and the
2595 register name, not at the beginning of the line. */
2598 s_req (int a ATTRIBUTE_UNUSED
)
2600 as_bad (_("invalid syntax for .req directive"));
2604 s_dn (int a ATTRIBUTE_UNUSED
)
2606 as_bad (_("invalid syntax for .dn directive"));
2610 s_qn (int a ATTRIBUTE_UNUSED
)
2612 as_bad (_("invalid syntax for .qn directive"));
2615 /* The .unreq directive deletes an alias which was previously defined
2616 by .req. For example:
2622 s_unreq (int a ATTRIBUTE_UNUSED
)
2627 name
= input_line_pointer
;
2629 while (*input_line_pointer
!= 0
2630 && *input_line_pointer
!= ' '
2631 && *input_line_pointer
!= '\n')
2632 ++input_line_pointer
;
2634 saved_char
= *input_line_pointer
;
2635 *input_line_pointer
= 0;
2638 as_bad (_("invalid syntax for .unreq directive"));
2641 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2645 as_bad (_("unknown register alias '%s'"), name
);
2646 else if (reg
->builtin
)
2647 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2654 hash_delete (arm_reg_hsh
, name
, FALSE
);
2655 free ((char *) reg
->name
);
2660 /* Also locate the all upper case and all lower case versions.
2661 Do not complain if we cannot find one or the other as it
2662 was probably deleted above. */
2664 nbuf
= strdup (name
);
2665 for (p
= nbuf
; *p
; p
++)
2667 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2670 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2671 free ((char *) reg
->name
);
2677 for (p
= nbuf
; *p
; p
++)
2679 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2682 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2683 free ((char *) reg
->name
);
2693 *input_line_pointer
= saved_char
;
2694 demand_empty_rest_of_line ();
2697 /* Directives: Instruction set selection. */
2700 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2701 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2702 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2703 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2705 /* Create a new mapping symbol for the transition to STATE. */
2708 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2711 const char * symname
;
2718 type
= BSF_NO_FLAGS
;
2722 type
= BSF_NO_FLAGS
;
2726 type
= BSF_NO_FLAGS
;
2732 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2733 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2738 THUMB_SET_FUNC (symbolP
, 0);
2739 ARM_SET_THUMB (symbolP
, 0);
2740 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2744 THUMB_SET_FUNC (symbolP
, 1);
2745 ARM_SET_THUMB (symbolP
, 1);
2746 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2754 /* Save the mapping symbols for future reference. Also check that
2755 we do not place two mapping symbols at the same offset within a
2756 frag. We'll handle overlap between frags in
2757 check_mapping_symbols.
2759 If .fill or other data filling directive generates zero sized data,
2760 the mapping symbol for the following code will have the same value
2761 as the one generated for the data filling directive. In this case,
2762 we replace the old symbol with the new one at the same address. */
2765 if (frag
->tc_frag_data
.first_map
!= NULL
)
2767 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2768 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2770 frag
->tc_frag_data
.first_map
= symbolP
;
2772 if (frag
->tc_frag_data
.last_map
!= NULL
)
2774 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2775 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2776 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2778 frag
->tc_frag_data
.last_map
= symbolP
;
2781 /* We must sometimes convert a region marked as code to data during
2782 code alignment, if an odd number of bytes have to be padded. The
2783 code mapping symbol is pushed to an aligned address. */
2786 insert_data_mapping_symbol (enum mstate state
,
2787 valueT value
, fragS
*frag
, offsetT bytes
)
2789 /* If there was already a mapping symbol, remove it. */
2790 if (frag
->tc_frag_data
.last_map
!= NULL
2791 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2793 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2797 know (frag
->tc_frag_data
.first_map
== symp
);
2798 frag
->tc_frag_data
.first_map
= NULL
;
2800 frag
->tc_frag_data
.last_map
= NULL
;
2801 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2804 make_mapping_symbol (MAP_DATA
, value
, frag
);
2805 make_mapping_symbol (state
, value
+ bytes
, frag
);
2808 static void mapping_state_2 (enum mstate state
, int max_chars
);
2810 /* Set the mapping state to STATE. Only call this when about to
2811 emit some STATE bytes to the file. */
2813 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2815 mapping_state (enum mstate state
)
2817 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2819 if (mapstate
== state
)
2820 /* The mapping symbol has already been emitted.
2821 There is nothing else to do. */
2824 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2826 All ARM instructions require 4-byte alignment.
2827 (Almost) all Thumb instructions require 2-byte alignment.
2829 When emitting instructions into any section, mark the section
2832 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2833 but themselves require 2-byte alignment; this applies to some
2834 PC- relative forms. However, these cases will involve implicit
2835 literal pool generation or an explicit .align >=2, both of
2836 which will cause the section to me marked with sufficient
2837 alignment. Thus, we don't handle those cases here. */
2838 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2840 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2841 /* This case will be evaluated later. */
2844 mapping_state_2 (state
, 0);
2847 /* Same as mapping_state, but MAX_CHARS bytes have already been
2848 allocated. Put the mapping symbol that far back. */
2851 mapping_state_2 (enum mstate state
, int max_chars
)
2853 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2855 if (!SEG_NORMAL (now_seg
))
2858 if (mapstate
== state
)
2859 /* The mapping symbol has already been emitted.
2860 There is nothing else to do. */
2863 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2864 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2866 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2867 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2870 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2873 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2874 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2878 #define mapping_state(x) ((void)0)
2879 #define mapping_state_2(x, y) ((void)0)
2882 /* Find the real, Thumb encoded start of a Thumb function. */
2886 find_real_start (symbolS
* symbolP
)
2889 const char * name
= S_GET_NAME (symbolP
);
2890 symbolS
* new_target
;
2892 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2893 #define STUB_NAME ".real_start_of"
2898 /* The compiler may generate BL instructions to local labels because
2899 it needs to perform a branch to a far away location. These labels
2900 do not have a corresponding ".real_start_of" label. We check
2901 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2902 the ".real_start_of" convention for nonlocal branches. */
2903 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2906 real_start
= concat (STUB_NAME
, name
, NULL
);
2907 new_target
= symbol_find (real_start
);
2910 if (new_target
== NULL
)
2912 as_warn (_("Failed to find real start of function: %s\n"), name
);
2913 new_target
= symbolP
;
2921 opcode_select (int width
)
2928 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2929 as_bad (_("selected processor does not support THUMB opcodes"));
2932 /* No need to force the alignment, since we will have been
2933 coming from ARM mode, which is word-aligned. */
2934 record_alignment (now_seg
, 1);
2941 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2942 as_bad (_("selected processor does not support ARM opcodes"));
2947 frag_align (2, 0, 0);
2949 record_alignment (now_seg
, 1);
2954 as_bad (_("invalid instruction size selected (%d)"), width
);
2959 s_arm (int ignore ATTRIBUTE_UNUSED
)
2962 demand_empty_rest_of_line ();
2966 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2969 demand_empty_rest_of_line ();
2973 s_code (int unused ATTRIBUTE_UNUSED
)
2977 temp
= get_absolute_expression ();
2982 opcode_select (temp
);
2986 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2991 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2993 /* If we are not already in thumb mode go into it, EVEN if
2994 the target processor does not support thumb instructions.
2995 This is used by gcc/config/arm/lib1funcs.asm for example
2996 to compile interworking support functions even if the
2997 target processor should not support interworking. */
3001 record_alignment (now_seg
, 1);
3004 demand_empty_rest_of_line ();
3008 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
3012 /* The following label is the name/address of the start of a Thumb function.
3013 We need to know this for the interworking support. */
3014 label_is_thumb_function_name
= TRUE
;
3017 /* Perform a .set directive, but also mark the alias as
3018 being a thumb function. */
3021 s_thumb_set (int equiv
)
3023 /* XXX the following is a duplicate of the code for s_set() in read.c
3024 We cannot just call that code as we need to get at the symbol that
3031 /* Especial apologies for the random logic:
3032 This just grew, and could be parsed much more simply!
3034 delim
= get_symbol_name (& name
);
3035 end_name
= input_line_pointer
;
3036 (void) restore_line_pointer (delim
);
3038 if (*input_line_pointer
!= ',')
3041 as_bad (_("expected comma after name \"%s\""), name
);
3043 ignore_rest_of_line ();
3047 input_line_pointer
++;
3050 if (name
[0] == '.' && name
[1] == '\0')
3052 /* XXX - this should not happen to .thumb_set. */
3056 if ((symbolP
= symbol_find (name
)) == NULL
3057 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3060 /* When doing symbol listings, play games with dummy fragments living
3061 outside the normal fragment chain to record the file and line info
3063 if (listing
& LISTING_SYMBOLS
)
3065 extern struct list_info_struct
* listing_tail
;
3066 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3068 memset (dummy_frag
, 0, sizeof (fragS
));
3069 dummy_frag
->fr_type
= rs_fill
;
3070 dummy_frag
->line
= listing_tail
;
3071 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
3072 dummy_frag
->fr_symbol
= symbolP
;
3076 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
3079 /* "set" symbols are local unless otherwise specified. */
3080 SF_SET_LOCAL (symbolP
);
3081 #endif /* OBJ_COFF */
3082 } /* Make a new symbol. */
3084 symbol_table_insert (symbolP
);
3089 && S_IS_DEFINED (symbolP
)
3090 && S_GET_SEGMENT (symbolP
) != reg_section
)
3091 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3093 pseudo_set (symbolP
);
3095 demand_empty_rest_of_line ();
3097 /* XXX Now we come to the Thumb specific bit of code. */
3099 THUMB_SET_FUNC (symbolP
, 1);
3100 ARM_SET_THUMB (symbolP
, 1);
3101 #if defined OBJ_ELF || defined OBJ_COFF
3102 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3106 /* Directives: Mode selection. */
3108 /* .syntax [unified|divided] - choose the new unified syntax
3109 (same for Arm and Thumb encoding, modulo slight differences in what
3110 can be represented) or the old divergent syntax for each mode. */
3112 s_syntax (int unused ATTRIBUTE_UNUSED
)
3116 delim
= get_symbol_name (& name
);
3118 if (!strcasecmp (name
, "unified"))
3119 unified_syntax
= TRUE
;
3120 else if (!strcasecmp (name
, "divided"))
3121 unified_syntax
= FALSE
;
3124 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3127 (void) restore_line_pointer (delim
);
3128 demand_empty_rest_of_line ();
3131 /* Directives: sectioning and alignment. */
3134 s_bss (int ignore ATTRIBUTE_UNUSED
)
3136 /* We don't support putting frags in the BSS segment, we fake it by
3137 marking in_bss, then looking at s_skip for clues. */
3138 subseg_set (bss_section
, 0);
3139 demand_empty_rest_of_line ();
3141 #ifdef md_elf_section_change_hook
3142 md_elf_section_change_hook ();
3147 s_even (int ignore ATTRIBUTE_UNUSED
)
3149 /* Never make frag if expect extra pass. */
3151 frag_align (1, 0, 0);
3153 record_alignment (now_seg
, 1);
3155 demand_empty_rest_of_line ();
3158 /* Directives: CodeComposer Studio. */
3160 /* .ref (for CodeComposer Studio syntax only). */
3162 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3164 if (codecomposer_syntax
)
3165 ignore_rest_of_line ();
3167 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3170 /* If name is not NULL, then it is used for marking the beginning of a
3171 function, whereas if it is NULL then it means the function end. */
3173 asmfunc_debug (const char * name
)
3175 static const char * last_name
= NULL
;
3179 gas_assert (last_name
== NULL
);
3182 if (debug_type
== DEBUG_STABS
)
3183 stabs_generate_asm_func (name
, name
);
3187 gas_assert (last_name
!= NULL
);
3189 if (debug_type
== DEBUG_STABS
)
3190 stabs_generate_asm_endfunc (last_name
, last_name
);
3197 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3199 if (codecomposer_syntax
)
3201 switch (asmfunc_state
)
3203 case OUTSIDE_ASMFUNC
:
3204 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3207 case WAITING_ASMFUNC_NAME
:
3208 as_bad (_(".asmfunc repeated."));
3211 case WAITING_ENDASMFUNC
:
3212 as_bad (_(".asmfunc without function."));
3215 demand_empty_rest_of_line ();
3218 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3222 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3224 if (codecomposer_syntax
)
3226 switch (asmfunc_state
)
3228 case OUTSIDE_ASMFUNC
:
3229 as_bad (_(".endasmfunc without a .asmfunc."));
3232 case WAITING_ASMFUNC_NAME
:
3233 as_bad (_(".endasmfunc without function."));
3236 case WAITING_ENDASMFUNC
:
3237 asmfunc_state
= OUTSIDE_ASMFUNC
;
3238 asmfunc_debug (NULL
);
3241 demand_empty_rest_of_line ();
3244 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3248 s_ccs_def (int name
)
3250 if (codecomposer_syntax
)
3253 as_bad (_(".def pseudo-op only available with -mccs flag."));
3256 /* Directives: Literal pools. */
3258 static literal_pool
*
3259 find_literal_pool (void)
3261 literal_pool
* pool
;
3263 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3265 if (pool
->section
== now_seg
3266 && pool
->sub_section
== now_subseg
)
3273 static literal_pool
*
3274 find_or_make_literal_pool (void)
3276 /* Next literal pool ID number. */
3277 static unsigned int latest_pool_num
= 1;
3278 literal_pool
* pool
;
3280 pool
= find_literal_pool ();
3284 /* Create a new pool. */
3285 pool
= XNEW (literal_pool
);
3289 pool
->next_free_entry
= 0;
3290 pool
->section
= now_seg
;
3291 pool
->sub_section
= now_subseg
;
3292 pool
->next
= list_of_pools
;
3293 pool
->symbol
= NULL
;
3294 pool
->alignment
= 2;
3296 /* Add it to the list. */
3297 list_of_pools
= pool
;
3300 /* New pools, and emptied pools, will have a NULL symbol. */
3301 if (pool
->symbol
== NULL
)
3303 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3304 (valueT
) 0, &zero_address_frag
);
3305 pool
->id
= latest_pool_num
++;
3312 /* Add the literal in the global 'inst'
3313 structure to the relevant literal pool. */
3316 add_to_lit_pool (unsigned int nbytes
)
3318 #define PADDING_SLOT 0x1
3319 #define LIT_ENTRY_SIZE_MASK 0xFF
3320 literal_pool
* pool
;
3321 unsigned int entry
, pool_size
= 0;
3322 bfd_boolean padding_slot_p
= FALSE
;
3328 imm1
= inst
.operands
[1].imm
;
3329 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3330 : inst
.relocs
[0].exp
.X_unsigned
? 0
3331 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3332 if (target_big_endian
)
3335 imm2
= inst
.operands
[1].imm
;
3339 pool
= find_or_make_literal_pool ();
3341 /* Check if this literal value is already in the pool. */
3342 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3346 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3347 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3348 && (pool
->literals
[entry
].X_add_number
3349 == inst
.relocs
[0].exp
.X_add_number
)
3350 && (pool
->literals
[entry
].X_md
== nbytes
)
3351 && (pool
->literals
[entry
].X_unsigned
3352 == inst
.relocs
[0].exp
.X_unsigned
))
3355 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3356 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3357 && (pool
->literals
[entry
].X_add_number
3358 == inst
.relocs
[0].exp
.X_add_number
)
3359 && (pool
->literals
[entry
].X_add_symbol
3360 == inst
.relocs
[0].exp
.X_add_symbol
)
3361 && (pool
->literals
[entry
].X_op_symbol
3362 == inst
.relocs
[0].exp
.X_op_symbol
)
3363 && (pool
->literals
[entry
].X_md
== nbytes
))
3366 else if ((nbytes
== 8)
3367 && !(pool_size
& 0x7)
3368 && ((entry
+ 1) != pool
->next_free_entry
)
3369 && (pool
->literals
[entry
].X_op
== O_constant
)
3370 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3371 && (pool
->literals
[entry
].X_unsigned
3372 == inst
.relocs
[0].exp
.X_unsigned
)
3373 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3374 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3375 && (pool
->literals
[entry
+ 1].X_unsigned
3376 == inst
.relocs
[0].exp
.X_unsigned
))
3379 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3380 if (padding_slot_p
&& (nbytes
== 4))
3386 /* Do we need to create a new entry? */
3387 if (entry
== pool
->next_free_entry
)
3389 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3391 inst
.error
= _("literal pool overflow");
3397 /* For 8-byte entries, we align to an 8-byte boundary,
3398 and split it into two 4-byte entries, because on 32-bit
3399 host, 8-byte constants are treated as big num, thus
3400 saved in "generic_bignum" which will be overwritten
3401 by later assignments.
3403 We also need to make sure there is enough space for
3406 We also check to make sure the literal operand is a
3408 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3409 || inst
.relocs
[0].exp
.X_op
== O_big
))
3411 inst
.error
= _("invalid type for literal pool");
3414 else if (pool_size
& 0x7)
3416 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3418 inst
.error
= _("literal pool overflow");
3422 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3423 pool
->literals
[entry
].X_op
= O_constant
;
3424 pool
->literals
[entry
].X_add_number
= 0;
3425 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3426 pool
->next_free_entry
+= 1;
3429 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3431 inst
.error
= _("literal pool overflow");
3435 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3436 pool
->literals
[entry
].X_op
= O_constant
;
3437 pool
->literals
[entry
].X_add_number
= imm1
;
3438 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3439 pool
->literals
[entry
++].X_md
= 4;
3440 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3441 pool
->literals
[entry
].X_op
= O_constant
;
3442 pool
->literals
[entry
].X_add_number
= imm2
;
3443 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3444 pool
->literals
[entry
].X_md
= 4;
3445 pool
->alignment
= 3;
3446 pool
->next_free_entry
+= 1;
3450 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3451 pool
->literals
[entry
].X_md
= 4;
3455 /* PR ld/12974: Record the location of the first source line to reference
3456 this entry in the literal pool. If it turns out during linking that the
3457 symbol does not exist we will be able to give an accurate line number for
3458 the (first use of the) missing reference. */
3459 if (debug_type
== DEBUG_DWARF2
)
3460 dwarf2_where (pool
->locs
+ entry
);
3462 pool
->next_free_entry
+= 1;
3464 else if (padding_slot_p
)
3466 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3467 pool
->literals
[entry
].X_md
= nbytes
;
3470 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3471 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3472 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3478 tc_start_label_without_colon (void)
3480 bfd_boolean ret
= TRUE
;
3482 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3484 const char *label
= input_line_pointer
;
3486 while (!is_end_of_line
[(int) label
[-1]])
3491 as_bad (_("Invalid label '%s'"), label
);
3495 asmfunc_debug (label
);
3497 asmfunc_state
= WAITING_ENDASMFUNC
;
3503 /* Can't use symbol_new here, so have to create a symbol and then at
3504 a later date assign it a value. That's what these functions do. */
3507 symbol_locate (symbolS
* symbolP
,
3508 const char * name
, /* It is copied, the caller can modify. */
3509 segT segment
, /* Segment identifier (SEG_<something>). */
3510 valueT valu
, /* Symbol value. */
3511 fragS
* frag
) /* Associated fragment. */
3514 char * preserved_copy_of_name
;
3516 name_length
= strlen (name
) + 1; /* +1 for \0. */
3517 obstack_grow (¬es
, name
, name_length
);
3518 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3520 #ifdef tc_canonicalize_symbol_name
3521 preserved_copy_of_name
=
3522 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3525 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3527 S_SET_SEGMENT (symbolP
, segment
);
3528 S_SET_VALUE (symbolP
, valu
);
3529 symbol_clear_list_pointers (symbolP
);
3531 symbol_set_frag (symbolP
, frag
);
3533 /* Link to end of symbol chain. */
3535 extern int symbol_table_frozen
;
3537 if (symbol_table_frozen
)
3541 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3543 obj_symbol_new_hook (symbolP
);
3545 #ifdef tc_symbol_new_hook
3546 tc_symbol_new_hook (symbolP
);
3550 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3551 #endif /* DEBUG_SYMS */
3555 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3558 literal_pool
* pool
;
3561 pool
= find_literal_pool ();
3563 || pool
->symbol
== NULL
3564 || pool
->next_free_entry
== 0)
3567 /* Align pool as you have word accesses.
3568 Only make a frag if we have to. */
3570 frag_align (pool
->alignment
, 0, 0);
3572 record_alignment (now_seg
, 2);
3575 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3576 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3578 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3580 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3581 (valueT
) frag_now_fix (), frag_now
);
3582 symbol_table_insert (pool
->symbol
);
3584 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3586 #if defined OBJ_COFF || defined OBJ_ELF
3587 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3590 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3593 if (debug_type
== DEBUG_DWARF2
)
3594 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3596 /* First output the expression in the instruction to the pool. */
3597 emit_expr (&(pool
->literals
[entry
]),
3598 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3601 /* Mark the pool as empty. */
3602 pool
->next_free_entry
= 0;
3603 pool
->symbol
= NULL
;
3607 /* Forward declarations for functions below, in the MD interface
3609 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3610 static valueT
create_unwind_entry (int);
3611 static void start_unwind_section (const segT
, int);
3612 static void add_unwind_opcode (valueT
, int);
3613 static void flush_pending_unwind (void);
3615 /* Directives: Data. */
3618 s_arm_elf_cons (int nbytes
)
3622 #ifdef md_flush_pending_output
3623 md_flush_pending_output ();
3626 if (is_it_end_of_statement ())
3628 demand_empty_rest_of_line ();
3632 #ifdef md_cons_align
3633 md_cons_align (nbytes
);
3636 mapping_state (MAP_DATA
);
3640 char *base
= input_line_pointer
;
3644 if (exp
.X_op
!= O_symbol
)
3645 emit_expr (&exp
, (unsigned int) nbytes
);
3648 char *before_reloc
= input_line_pointer
;
3649 reloc
= parse_reloc (&input_line_pointer
);
3652 as_bad (_("unrecognized relocation suffix"));
3653 ignore_rest_of_line ();
3656 else if (reloc
== BFD_RELOC_UNUSED
)
3657 emit_expr (&exp
, (unsigned int) nbytes
);
3660 reloc_howto_type
*howto
= (reloc_howto_type
*)
3661 bfd_reloc_type_lookup (stdoutput
,
3662 (bfd_reloc_code_real_type
) reloc
);
3663 int size
= bfd_get_reloc_size (howto
);
3665 if (reloc
== BFD_RELOC_ARM_PLT32
)
3667 as_bad (_("(plt) is only valid on branch targets"));
3668 reloc
= BFD_RELOC_UNUSED
;
3673 as_bad (ngettext ("%s relocations do not fit in %d byte",
3674 "%s relocations do not fit in %d bytes",
3676 howto
->name
, nbytes
);
3679 /* We've parsed an expression stopping at O_symbol.
3680 But there may be more expression left now that we
3681 have parsed the relocation marker. Parse it again.
3682 XXX Surely there is a cleaner way to do this. */
3683 char *p
= input_line_pointer
;
3685 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3687 memcpy (save_buf
, base
, input_line_pointer
- base
);
3688 memmove (base
+ (input_line_pointer
- before_reloc
),
3689 base
, before_reloc
- base
);
3691 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3693 memcpy (base
, save_buf
, p
- base
);
3695 offset
= nbytes
- size
;
3696 p
= frag_more (nbytes
);
3697 memset (p
, 0, nbytes
);
3698 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3699 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3705 while (*input_line_pointer
++ == ',');
3707 /* Put terminator back into stream. */
3708 input_line_pointer
--;
3709 demand_empty_rest_of_line ();
3712 /* Emit an expression containing a 32-bit thumb instruction.
3713 Implementation based on put_thumb32_insn. */
3716 emit_thumb32_expr (expressionS
* exp
)
3718 expressionS exp_high
= *exp
;
3720 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3721 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3722 exp
->X_add_number
&= 0xffff;
3723 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3726 /* Guess the instruction size based on the opcode. */
3729 thumb_insn_size (int opcode
)
3731 if ((unsigned int) opcode
< 0xe800u
)
3733 else if ((unsigned int) opcode
>= 0xe8000000u
)
3740 emit_insn (expressionS
*exp
, int nbytes
)
3744 if (exp
->X_op
== O_constant
)
3749 size
= thumb_insn_size (exp
->X_add_number
);
3753 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3755 as_bad (_(".inst.n operand too big. "\
3756 "Use .inst.w instead"));
3761 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3762 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3764 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3766 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3767 emit_thumb32_expr (exp
);
3769 emit_expr (exp
, (unsigned int) size
);
3771 it_fsm_post_encode ();
3775 as_bad (_("cannot determine Thumb instruction size. " \
3776 "Use .inst.n/.inst.w instead"));
3779 as_bad (_("constant expression required"));
3784 /* Like s_arm_elf_cons but do not use md_cons_align and
3785 set the mapping state to MAP_ARM/MAP_THUMB. */
3788 s_arm_elf_inst (int nbytes
)
3790 if (is_it_end_of_statement ())
3792 demand_empty_rest_of_line ();
3796 /* Calling mapping_state () here will not change ARM/THUMB,
3797 but will ensure not to be in DATA state. */
3800 mapping_state (MAP_THUMB
);
3805 as_bad (_("width suffixes are invalid in ARM mode"));
3806 ignore_rest_of_line ();
3812 mapping_state (MAP_ARM
);
3821 if (! emit_insn (& exp
, nbytes
))
3823 ignore_rest_of_line ();
3827 while (*input_line_pointer
++ == ',');
3829 /* Put terminator back into stream. */
3830 input_line_pointer
--;
3831 demand_empty_rest_of_line ();
3834 /* Parse a .rel31 directive. */
3837 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3844 if (*input_line_pointer
== '1')
3845 highbit
= 0x80000000;
3846 else if (*input_line_pointer
!= '0')
3847 as_bad (_("expected 0 or 1"));
3849 input_line_pointer
++;
3850 if (*input_line_pointer
!= ',')
3851 as_bad (_("missing comma"));
3852 input_line_pointer
++;
3854 #ifdef md_flush_pending_output
3855 md_flush_pending_output ();
3858 #ifdef md_cons_align
3862 mapping_state (MAP_DATA
);
3867 md_number_to_chars (p
, highbit
, 4);
3868 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3869 BFD_RELOC_ARM_PREL31
);
3871 demand_empty_rest_of_line ();
3874 /* Directives: AEABI stack-unwind tables. */
3876 /* Parse an unwind_fnstart directive. Simply records the current location. */
3879 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3881 demand_empty_rest_of_line ();
3882 if (unwind
.proc_start
)
3884 as_bad (_("duplicate .fnstart directive"));
3888 /* Mark the start of the function. */
3889 unwind
.proc_start
= expr_build_dot ();
3891 /* Reset the rest of the unwind info. */
3892 unwind
.opcode_count
= 0;
3893 unwind
.table_entry
= NULL
;
3894 unwind
.personality_routine
= NULL
;
3895 unwind
.personality_index
= -1;
3896 unwind
.frame_size
= 0;
3897 unwind
.fp_offset
= 0;
3898 unwind
.fp_reg
= REG_SP
;
3900 unwind
.sp_restored
= 0;
3904 /* Parse a handlerdata directive. Creates the exception handling table entry
3905 for the function. */
3908 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3910 demand_empty_rest_of_line ();
3911 if (!unwind
.proc_start
)
3912 as_bad (MISSING_FNSTART
);
3914 if (unwind
.table_entry
)
3915 as_bad (_("duplicate .handlerdata directive"));
3917 create_unwind_entry (1);
3920 /* Parse an unwind_fnend directive. Generates the index table entry. */
3923 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3928 unsigned int marked_pr_dependency
;
3930 demand_empty_rest_of_line ();
3932 if (!unwind
.proc_start
)
3934 as_bad (_(".fnend directive without .fnstart"));
3938 /* Add eh table entry. */
3939 if (unwind
.table_entry
== NULL
)
3940 val
= create_unwind_entry (0);
3944 /* Add index table entry. This is two words. */
3945 start_unwind_section (unwind
.saved_seg
, 1);
3946 frag_align (2, 0, 0);
3947 record_alignment (now_seg
, 2);
3949 ptr
= frag_more (8);
3951 where
= frag_now_fix () - 8;
3953 /* Self relative offset of the function start. */
3954 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3955 BFD_RELOC_ARM_PREL31
);
3957 /* Indicate dependency on EHABI-defined personality routines to the
3958 linker, if it hasn't been done already. */
3959 marked_pr_dependency
3960 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3961 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3962 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3964 static const char *const name
[] =
3966 "__aeabi_unwind_cpp_pr0",
3967 "__aeabi_unwind_cpp_pr1",
3968 "__aeabi_unwind_cpp_pr2"
3970 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3971 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3972 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3973 |= 1 << unwind
.personality_index
;
3977 /* Inline exception table entry. */
3978 md_number_to_chars (ptr
+ 4, val
, 4);
3980 /* Self relative offset of the table entry. */
3981 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3982 BFD_RELOC_ARM_PREL31
);
3984 /* Restore the original section. */
3985 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3987 unwind
.proc_start
= NULL
;
3991 /* Parse an unwind_cantunwind directive. */
3994 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3996 demand_empty_rest_of_line ();
3997 if (!unwind
.proc_start
)
3998 as_bad (MISSING_FNSTART
);
4000 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4001 as_bad (_("personality routine specified for cantunwind frame"));
4003 unwind
.personality_index
= -2;
4007 /* Parse a personalityindex directive. */
4010 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
4014 if (!unwind
.proc_start
)
4015 as_bad (MISSING_FNSTART
);
4017 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4018 as_bad (_("duplicate .personalityindex directive"));
4022 if (exp
.X_op
!= O_constant
4023 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
4025 as_bad (_("bad personality routine number"));
4026 ignore_rest_of_line ();
4030 unwind
.personality_index
= exp
.X_add_number
;
4032 demand_empty_rest_of_line ();
4036 /* Parse a personality directive. */
4039 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
4043 if (!unwind
.proc_start
)
4044 as_bad (MISSING_FNSTART
);
4046 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4047 as_bad (_("duplicate .personality directive"));
4049 c
= get_symbol_name (& name
);
4050 p
= input_line_pointer
;
4052 ++ input_line_pointer
;
4053 unwind
.personality_routine
= symbol_find_or_make (name
);
4055 demand_empty_rest_of_line ();
4059 /* Parse a directive saving core registers. */
4062 s_arm_unwind_save_core (void)
4068 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4071 as_bad (_("expected register list"));
4072 ignore_rest_of_line ();
4076 demand_empty_rest_of_line ();
4078 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4079 into .unwind_save {..., sp...}. We aren't bothered about the value of
4080 ip because it is clobbered by calls. */
4081 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4082 && (range
& 0x3000) == 0x1000)
4084 unwind
.opcode_count
--;
4085 unwind
.sp_restored
= 0;
4086 range
= (range
| 0x2000) & ~0x1000;
4087 unwind
.pending_offset
= 0;
4093 /* See if we can use the short opcodes. These pop a block of up to 8
4094 registers starting with r4, plus maybe r14. */
4095 for (n
= 0; n
< 8; n
++)
4097 /* Break at the first non-saved register. */
4098 if ((range
& (1 << (n
+ 4))) == 0)
4101 /* See if there are any other bits set. */
4102 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4104 /* Use the long form. */
4105 op
= 0x8000 | ((range
>> 4) & 0xfff);
4106 add_unwind_opcode (op
, 2);
4110 /* Use the short form. */
4112 op
= 0xa8; /* Pop r14. */
4114 op
= 0xa0; /* Do not pop r14. */
4116 add_unwind_opcode (op
, 1);
4123 op
= 0xb100 | (range
& 0xf);
4124 add_unwind_opcode (op
, 2);
4127 /* Record the number of bytes pushed. */
4128 for (n
= 0; n
< 16; n
++)
4130 if (range
& (1 << n
))
4131 unwind
.frame_size
+= 4;
4136 /* Parse a directive saving FPA registers. */
4139 s_arm_unwind_save_fpa (int reg
)
4145 /* Get Number of registers to transfer. */
4146 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4149 exp
.X_op
= O_illegal
;
4151 if (exp
.X_op
!= O_constant
)
4153 as_bad (_("expected , <constant>"));
4154 ignore_rest_of_line ();
4158 num_regs
= exp
.X_add_number
;
4160 if (num_regs
< 1 || num_regs
> 4)
4162 as_bad (_("number of registers must be in the range [1:4]"));
4163 ignore_rest_of_line ();
4167 demand_empty_rest_of_line ();
4172 op
= 0xb4 | (num_regs
- 1);
4173 add_unwind_opcode (op
, 1);
4178 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4179 add_unwind_opcode (op
, 2);
4181 unwind
.frame_size
+= num_regs
* 12;
4185 /* Parse a directive saving VFP registers for ARMv6 and above. */
4188 s_arm_unwind_save_vfp_armv6 (void)
4193 int num_vfpv3_regs
= 0;
4194 int num_regs_below_16
;
4195 bfd_boolean partial_match
;
4197 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
,
4201 as_bad (_("expected register list"));
4202 ignore_rest_of_line ();
4206 demand_empty_rest_of_line ();
4208 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4209 than FSTMX/FLDMX-style ones). */
4211 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4213 num_vfpv3_regs
= count
;
4214 else if (start
+ count
> 16)
4215 num_vfpv3_regs
= start
+ count
- 16;
4217 if (num_vfpv3_regs
> 0)
4219 int start_offset
= start
> 16 ? start
- 16 : 0;
4220 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4221 add_unwind_opcode (op
, 2);
4224 /* Generate opcode for registers numbered in the range 0 .. 15. */
4225 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4226 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4227 if (num_regs_below_16
> 0)
4229 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4230 add_unwind_opcode (op
, 2);
4233 unwind
.frame_size
+= count
* 8;
4237 /* Parse a directive saving VFP registers for pre-ARMv6. */
4240 s_arm_unwind_save_vfp (void)
4245 bfd_boolean partial_match
;
4247 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
,
4251 as_bad (_("expected register list"));
4252 ignore_rest_of_line ();
4256 demand_empty_rest_of_line ();
4261 op
= 0xb8 | (count
- 1);
4262 add_unwind_opcode (op
, 1);
4267 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4268 add_unwind_opcode (op
, 2);
4270 unwind
.frame_size
+= count
* 8 + 4;
4274 /* Parse a directive saving iWMMXt data registers. */
4277 s_arm_unwind_save_mmxwr (void)
4285 if (*input_line_pointer
== '{')
4286 input_line_pointer
++;
4290 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4294 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4299 as_tsktsk (_("register list not in ascending order"));
4302 if (*input_line_pointer
== '-')
4304 input_line_pointer
++;
4305 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4308 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4311 else if (reg
>= hi_reg
)
4313 as_bad (_("bad register range"));
4316 for (; reg
< hi_reg
; reg
++)
4320 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4322 skip_past_char (&input_line_pointer
, '}');
4324 demand_empty_rest_of_line ();
4326 /* Generate any deferred opcodes because we're going to be looking at
4328 flush_pending_unwind ();
4330 for (i
= 0; i
< 16; i
++)
4332 if (mask
& (1 << i
))
4333 unwind
.frame_size
+= 8;
4336 /* Attempt to combine with a previous opcode. We do this because gcc
4337 likes to output separate unwind directives for a single block of
4339 if (unwind
.opcode_count
> 0)
4341 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4342 if ((i
& 0xf8) == 0xc0)
4345 /* Only merge if the blocks are contiguous. */
4348 if ((mask
& 0xfe00) == (1 << 9))
4350 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4351 unwind
.opcode_count
--;
4354 else if (i
== 6 && unwind
.opcode_count
>= 2)
4356 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4360 op
= 0xffff << (reg
- 1);
4362 && ((mask
& op
) == (1u << (reg
- 1))))
4364 op
= (1 << (reg
+ i
+ 1)) - 1;
4365 op
&= ~((1 << reg
) - 1);
4367 unwind
.opcode_count
-= 2;
4374 /* We want to generate opcodes in the order the registers have been
4375 saved, ie. descending order. */
4376 for (reg
= 15; reg
>= -1; reg
--)
4378 /* Save registers in blocks. */
4380 || !(mask
& (1 << reg
)))
4382 /* We found an unsaved reg. Generate opcodes to save the
4389 op
= 0xc0 | (hi_reg
- 10);
4390 add_unwind_opcode (op
, 1);
4395 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4396 add_unwind_opcode (op
, 2);
4405 ignore_rest_of_line ();
4409 s_arm_unwind_save_mmxwcg (void)
4416 if (*input_line_pointer
== '{')
4417 input_line_pointer
++;
4419 skip_whitespace (input_line_pointer
);
4423 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4427 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4433 as_tsktsk (_("register list not in ascending order"));
4436 if (*input_line_pointer
== '-')
4438 input_line_pointer
++;
4439 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4442 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4445 else if (reg
>= hi_reg
)
4447 as_bad (_("bad register range"));
4450 for (; reg
< hi_reg
; reg
++)
4454 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4456 skip_past_char (&input_line_pointer
, '}');
4458 demand_empty_rest_of_line ();
4460 /* Generate any deferred opcodes because we're going to be looking at
4462 flush_pending_unwind ();
4464 for (reg
= 0; reg
< 16; reg
++)
4466 if (mask
& (1 << reg
))
4467 unwind
.frame_size
+= 4;
4470 add_unwind_opcode (op
, 2);
4473 ignore_rest_of_line ();
4477 /* Parse an unwind_save directive.
4478 If the argument is non-zero, this is a .vsave directive. */
4481 s_arm_unwind_save (int arch_v6
)
4484 struct reg_entry
*reg
;
4485 bfd_boolean had_brace
= FALSE
;
4487 if (!unwind
.proc_start
)
4488 as_bad (MISSING_FNSTART
);
4490 /* Figure out what sort of save we have. */
4491 peek
= input_line_pointer
;
4499 reg
= arm_reg_parse_multi (&peek
);
4503 as_bad (_("register expected"));
4504 ignore_rest_of_line ();
4513 as_bad (_("FPA .unwind_save does not take a register list"));
4514 ignore_rest_of_line ();
4517 input_line_pointer
= peek
;
4518 s_arm_unwind_save_fpa (reg
->number
);
4522 s_arm_unwind_save_core ();
4527 s_arm_unwind_save_vfp_armv6 ();
4529 s_arm_unwind_save_vfp ();
4532 case REG_TYPE_MMXWR
:
4533 s_arm_unwind_save_mmxwr ();
4536 case REG_TYPE_MMXWCG
:
4537 s_arm_unwind_save_mmxwcg ();
4541 as_bad (_(".unwind_save does not support this kind of register"));
4542 ignore_rest_of_line ();
4547 /* Parse an unwind_movsp directive. */
4550 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4556 if (!unwind
.proc_start
)
4557 as_bad (MISSING_FNSTART
);
4559 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4562 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4563 ignore_rest_of_line ();
4567 /* Optional constant. */
4568 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4570 if (immediate_for_directive (&offset
) == FAIL
)
4576 demand_empty_rest_of_line ();
4578 if (reg
== REG_SP
|| reg
== REG_PC
)
4580 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4584 if (unwind
.fp_reg
!= REG_SP
)
4585 as_bad (_("unexpected .unwind_movsp directive"));
4587 /* Generate opcode to restore the value. */
4589 add_unwind_opcode (op
, 1);
4591 /* Record the information for later. */
4592 unwind
.fp_reg
= reg
;
4593 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4594 unwind
.sp_restored
= 1;
4597 /* Parse an unwind_pad directive. */
4600 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4604 if (!unwind
.proc_start
)
4605 as_bad (MISSING_FNSTART
);
4607 if (immediate_for_directive (&offset
) == FAIL
)
4612 as_bad (_("stack increment must be multiple of 4"));
4613 ignore_rest_of_line ();
4617 /* Don't generate any opcodes, just record the details for later. */
4618 unwind
.frame_size
+= offset
;
4619 unwind
.pending_offset
+= offset
;
4621 demand_empty_rest_of_line ();
4624 /* Parse an unwind_setfp directive. */
4627 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4633 if (!unwind
.proc_start
)
4634 as_bad (MISSING_FNSTART
);
4636 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4637 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4640 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4642 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4644 as_bad (_("expected <reg>, <reg>"));
4645 ignore_rest_of_line ();
4649 /* Optional constant. */
4650 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4652 if (immediate_for_directive (&offset
) == FAIL
)
4658 demand_empty_rest_of_line ();
4660 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4662 as_bad (_("register must be either sp or set by a previous"
4663 "unwind_movsp directive"));
4667 /* Don't generate any opcodes, just record the information for later. */
4668 unwind
.fp_reg
= fp_reg
;
4670 if (sp_reg
== REG_SP
)
4671 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4673 unwind
.fp_offset
-= offset
;
4676 /* Parse an unwind_raw directive. */
4679 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4682 /* This is an arbitrary limit. */
4683 unsigned char op
[16];
4686 if (!unwind
.proc_start
)
4687 as_bad (MISSING_FNSTART
);
4690 if (exp
.X_op
== O_constant
4691 && skip_past_comma (&input_line_pointer
) != FAIL
)
4693 unwind
.frame_size
+= exp
.X_add_number
;
4697 exp
.X_op
= O_illegal
;
4699 if (exp
.X_op
!= O_constant
)
4701 as_bad (_("expected <offset>, <opcode>"));
4702 ignore_rest_of_line ();
4708 /* Parse the opcode. */
4713 as_bad (_("unwind opcode too long"));
4714 ignore_rest_of_line ();
4716 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4718 as_bad (_("invalid unwind opcode"));
4719 ignore_rest_of_line ();
4722 op
[count
++] = exp
.X_add_number
;
4724 /* Parse the next byte. */
4725 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4731 /* Add the opcode bytes in reverse order. */
4733 add_unwind_opcode (op
[count
], 1);
4735 demand_empty_rest_of_line ();
4739 /* Parse a .eabi_attribute directive. */
4742 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4744 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4746 if (tag
>= 0 && tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4747 attributes_set_explicitly
[tag
] = 1;
4750 /* Emit a tls fix for the symbol. */
4753 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4757 #ifdef md_flush_pending_output
4758 md_flush_pending_output ();
4761 #ifdef md_cons_align
4765 /* Since we're just labelling the code, there's no need to define a
4768 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4769 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4770 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4771 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4773 #endif /* OBJ_ELF */
4775 static void s_arm_arch (int);
4776 static void s_arm_object_arch (int);
4777 static void s_arm_cpu (int);
4778 static void s_arm_fpu (int);
4779 static void s_arm_arch_extension (int);
4784 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4791 if (exp
.X_op
== O_symbol
)
4792 exp
.X_op
= O_secrel
;
4794 emit_expr (&exp
, 4);
4796 while (*input_line_pointer
++ == ',');
4798 input_line_pointer
--;
4799 demand_empty_rest_of_line ();
4803 /* This table describes all the machine specific pseudo-ops the assembler
4804 has to support. The fields are:
4805 pseudo-op name without dot
4806 function to call to execute this pseudo-op
4807 Integer arg to pass to the function. */
4809 const pseudo_typeS md_pseudo_table
[] =
4811 /* Never called because '.req' does not start a line. */
4812 { "req", s_req
, 0 },
4813 /* Following two are likewise never called. */
4816 { "unreq", s_unreq
, 0 },
4817 { "bss", s_bss
, 0 },
4818 { "align", s_align_ptwo
, 2 },
4819 { "arm", s_arm
, 0 },
4820 { "thumb", s_thumb
, 0 },
4821 { "code", s_code
, 0 },
4822 { "force_thumb", s_force_thumb
, 0 },
4823 { "thumb_func", s_thumb_func
, 0 },
4824 { "thumb_set", s_thumb_set
, 0 },
4825 { "even", s_even
, 0 },
4826 { "ltorg", s_ltorg
, 0 },
4827 { "pool", s_ltorg
, 0 },
4828 { "syntax", s_syntax
, 0 },
4829 { "cpu", s_arm_cpu
, 0 },
4830 { "arch", s_arm_arch
, 0 },
4831 { "object_arch", s_arm_object_arch
, 0 },
4832 { "fpu", s_arm_fpu
, 0 },
4833 { "arch_extension", s_arm_arch_extension
, 0 },
4835 { "word", s_arm_elf_cons
, 4 },
4836 { "long", s_arm_elf_cons
, 4 },
4837 { "inst.n", s_arm_elf_inst
, 2 },
4838 { "inst.w", s_arm_elf_inst
, 4 },
4839 { "inst", s_arm_elf_inst
, 0 },
4840 { "rel31", s_arm_rel31
, 0 },
4841 { "fnstart", s_arm_unwind_fnstart
, 0 },
4842 { "fnend", s_arm_unwind_fnend
, 0 },
4843 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4844 { "personality", s_arm_unwind_personality
, 0 },
4845 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4846 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4847 { "save", s_arm_unwind_save
, 0 },
4848 { "vsave", s_arm_unwind_save
, 1 },
4849 { "movsp", s_arm_unwind_movsp
, 0 },
4850 { "pad", s_arm_unwind_pad
, 0 },
4851 { "setfp", s_arm_unwind_setfp
, 0 },
4852 { "unwind_raw", s_arm_unwind_raw
, 0 },
4853 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4854 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4858 /* These are used for dwarf. */
4862 /* These are used for dwarf2. */
4863 { "file", dwarf2_directive_file
, 0 },
4864 { "loc", dwarf2_directive_loc
, 0 },
4865 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4867 { "extend", float_cons
, 'x' },
4868 { "ldouble", float_cons
, 'x' },
4869 { "packed", float_cons
, 'p' },
4871 {"secrel32", pe_directive_secrel
, 0},
4874 /* These are for compatibility with CodeComposer Studio. */
4875 {"ref", s_ccs_ref
, 0},
4876 {"def", s_ccs_def
, 0},
4877 {"asmfunc", s_ccs_asmfunc
, 0},
4878 {"endasmfunc", s_ccs_endasmfunc
, 0},
4883 /* Parser functions used exclusively in instruction operands. */
4885 /* Generic immediate-value read function for use in insn parsing.
4886 STR points to the beginning of the immediate (the leading #);
4887 VAL receives the value; if the value is outside [MIN, MAX]
4888 issue an error. PREFIX_OPT is true if the immediate prefix is
4892 parse_immediate (char **str
, int *val
, int min
, int max
,
4893 bfd_boolean prefix_opt
)
4897 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4898 if (exp
.X_op
!= O_constant
)
4900 inst
.error
= _("constant expression required");
4904 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4906 inst
.error
= _("immediate value out of range");
4910 *val
= exp
.X_add_number
;
4914 /* Less-generic immediate-value read function with the possibility of loading a
4915 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4916 instructions. Puts the result directly in inst.operands[i]. */
4919 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4920 bfd_boolean allow_symbol_p
)
4923 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4926 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4928 if (exp_p
->X_op
== O_constant
)
4930 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4931 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4932 O_constant. We have to be careful not to break compilation for
4933 32-bit X_add_number, though. */
4934 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4936 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4937 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4939 inst
.operands
[i
].regisimm
= 1;
4942 else if (exp_p
->X_op
== O_big
4943 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4945 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4947 /* Bignums have their least significant bits in
4948 generic_bignum[0]. Make sure we put 32 bits in imm and
4949 32 bits in reg, in a (hopefully) portable way. */
4950 gas_assert (parts
!= 0);
4952 /* Make sure that the number is not too big.
4953 PR 11972: Bignums can now be sign-extended to the
4954 size of a .octa so check that the out of range bits
4955 are all zero or all one. */
4956 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4958 LITTLENUM_TYPE m
= -1;
4960 if (generic_bignum
[parts
* 2] != 0
4961 && generic_bignum
[parts
* 2] != m
)
4964 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4965 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4969 inst
.operands
[i
].imm
= 0;
4970 for (j
= 0; j
< parts
; j
++, idx
++)
4971 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4972 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4973 inst
.operands
[i
].reg
= 0;
4974 for (j
= 0; j
< parts
; j
++, idx
++)
4975 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4976 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4977 inst
.operands
[i
].regisimm
= 1;
4979 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4987 /* Returns the pseudo-register number of an FPA immediate constant,
4988 or FAIL if there isn't a valid constant here. */
4991 parse_fpa_immediate (char ** str
)
4993 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4999 /* First try and match exact strings, this is to guarantee
5000 that some formats will work even for cross assembly. */
5002 for (i
= 0; fp_const
[i
]; i
++)
5004 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
5008 *str
+= strlen (fp_const
[i
]);
5009 if (is_end_of_line
[(unsigned char) **str
])
5015 /* Just because we didn't get a match doesn't mean that the constant
5016 isn't valid, just that it is in a format that we don't
5017 automatically recognize. Try parsing it with the standard
5018 expression routines. */
5020 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
5022 /* Look for a raw floating point number. */
5023 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
5024 && is_end_of_line
[(unsigned char) *save_in
])
5026 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5028 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5030 if (words
[j
] != fp_values
[i
][j
])
5034 if (j
== MAX_LITTLENUMS
)
5042 /* Try and parse a more complex expression, this will probably fail
5043 unless the code uses a floating point prefix (eg "0f"). */
5044 save_in
= input_line_pointer
;
5045 input_line_pointer
= *str
;
5046 if (expression (&exp
) == absolute_section
5047 && exp
.X_op
== O_big
5048 && exp
.X_add_number
< 0)
5050 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5052 #define X_PRECISION 5
5053 #define E_PRECISION 15L
5054 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5056 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5058 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5060 if (words
[j
] != fp_values
[i
][j
])
5064 if (j
== MAX_LITTLENUMS
)
5066 *str
= input_line_pointer
;
5067 input_line_pointer
= save_in
;
5074 *str
= input_line_pointer
;
5075 input_line_pointer
= save_in
;
5076 inst
.error
= _("invalid FPA immediate expression");
5080 /* Returns 1 if a number has "quarter-precision" float format
5081 0baBbbbbbc defgh000 00000000 00000000. */
5084 is_quarter_float (unsigned imm
)
5086 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5087 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5091 /* Detect the presence of a floating point or integer zero constant,
5095 parse_ifimm_zero (char **in
)
5099 if (!is_immediate_prefix (**in
))
5101 /* In unified syntax, all prefixes are optional. */
5102 if (!unified_syntax
)
5108 /* Accept #0x0 as a synonym for #0. */
5109 if (strncmp (*in
, "0x", 2) == 0)
5112 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
5117 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5118 &generic_floating_point_number
);
5121 && generic_floating_point_number
.sign
== '+'
5122 && (generic_floating_point_number
.low
5123 > generic_floating_point_number
.leader
))
5129 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5130 0baBbbbbbc defgh000 00000000 00000000.
5131 The zero and minus-zero cases need special handling, since they can't be
5132 encoded in the "quarter-precision" float format, but can nonetheless be
5133 loaded as integer constants. */
5136 parse_qfloat_immediate (char **ccp
, int *immed
)
5140 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5141 int found_fpchar
= 0;
5143 skip_past_char (&str
, '#');
5145 /* We must not accidentally parse an integer as a floating-point number. Make
5146 sure that the value we parse is not an integer by checking for special
5147 characters '.' or 'e'.
5148 FIXME: This is a horrible hack, but doing better is tricky because type
5149 information isn't in a very usable state at parse time. */
5151 skip_whitespace (fpnum
);
5153 if (strncmp (fpnum
, "0x", 2) == 0)
5157 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5158 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5168 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5170 unsigned fpword
= 0;
5173 /* Our FP word must be 32 bits (single-precision FP). */
5174 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5176 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5180 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5193 /* Shift operands. */
5196 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5199 struct asm_shift_name
5202 enum shift_kind kind
;
5205 /* Third argument to parse_shift. */
5206 enum parse_shift_mode
5208 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5209 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5210 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5211 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5212 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5215 /* Parse a <shift> specifier on an ARM data processing instruction.
5216 This has three forms:
5218 (LSL|LSR|ASL|ASR|ROR) Rs
5219 (LSL|LSR|ASL|ASR|ROR) #imm
5222 Note that ASL is assimilated to LSL in the instruction encoding, and
5223 RRX to ROR #0 (which cannot be written as such). */
5226 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5228 const struct asm_shift_name
*shift_name
;
5229 enum shift_kind shift
;
5234 for (p
= *str
; ISALPHA (*p
); p
++)
5239 inst
.error
= _("shift expression expected");
5243 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5246 if (shift_name
== NULL
)
5248 inst
.error
= _("shift expression expected");
5252 shift
= shift_name
->kind
;
5256 case NO_SHIFT_RESTRICT
:
5257 case SHIFT_IMMEDIATE
: break;
5259 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5260 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5262 inst
.error
= _("'LSL' or 'ASR' required");
5267 case SHIFT_LSL_IMMEDIATE
:
5268 if (shift
!= SHIFT_LSL
)
5270 inst
.error
= _("'LSL' required");
5275 case SHIFT_ASR_IMMEDIATE
:
5276 if (shift
!= SHIFT_ASR
)
5278 inst
.error
= _("'ASR' required");
5286 if (shift
!= SHIFT_RRX
)
5288 /* Whitespace can appear here if the next thing is a bare digit. */
5289 skip_whitespace (p
);
5291 if (mode
== NO_SHIFT_RESTRICT
5292 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5294 inst
.operands
[i
].imm
= reg
;
5295 inst
.operands
[i
].immisreg
= 1;
5297 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5300 inst
.operands
[i
].shift_kind
= shift
;
5301 inst
.operands
[i
].shifted
= 1;
5306 /* Parse a <shifter_operand> for an ARM data processing instruction:
5309 #<immediate>, <rotate>
5313 where <shift> is defined by parse_shift above, and <rotate> is a
5314 multiple of 2 between 0 and 30. Validation of immediate operands
5315 is deferred to md_apply_fix. */
5318 parse_shifter_operand (char **str
, int i
)
5323 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5325 inst
.operands
[i
].reg
= value
;
5326 inst
.operands
[i
].isreg
= 1;
5328 /* parse_shift will override this if appropriate */
5329 inst
.relocs
[0].exp
.X_op
= O_constant
;
5330 inst
.relocs
[0].exp
.X_add_number
= 0;
5332 if (skip_past_comma (str
) == FAIL
)
5335 /* Shift operation on register. */
5336 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5339 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5342 if (skip_past_comma (str
) == SUCCESS
)
5344 /* #x, y -- ie explicit rotation by Y. */
5345 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5348 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5350 inst
.error
= _("constant expression expected");
5354 value
= exp
.X_add_number
;
5355 if (value
< 0 || value
> 30 || value
% 2 != 0)
5357 inst
.error
= _("invalid rotation");
5360 if (inst
.relocs
[0].exp
.X_add_number
< 0
5361 || inst
.relocs
[0].exp
.X_add_number
> 255)
5363 inst
.error
= _("invalid constant");
5367 /* Encode as specified. */
5368 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5372 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5373 inst
.relocs
[0].pc_rel
= 0;
5377 /* Group relocation information. Each entry in the table contains the
5378 textual name of the relocation as may appear in assembler source
5379 and must end with a colon.
5380 Along with this textual name are the relocation codes to be used if
5381 the corresponding instruction is an ALU instruction (ADD or SUB only),
5382 an LDR, an LDRS, or an LDC. */
5384 struct group_reloc_table_entry
5395 /* Varieties of non-ALU group relocation. */
5402 static struct group_reloc_table_entry group_reloc_table
[] =
5403 { /* Program counter relative: */
5405 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5410 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5411 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5412 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5413 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5415 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5420 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5421 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5422 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5423 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5425 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5426 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5427 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5428 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5429 /* Section base relative */
5431 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5436 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5437 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5438 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5439 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5441 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5446 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5447 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5448 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5449 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5451 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5452 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5453 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5454 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5455 /* Absolute thumb alu relocations. */
5457 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5462 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5467 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5472 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5477 /* Given the address of a pointer pointing to the textual name of a group
5478 relocation as may appear in assembler source, attempt to find its details
5479 in group_reloc_table. The pointer will be updated to the character after
5480 the trailing colon. On failure, FAIL will be returned; SUCCESS
5481 otherwise. On success, *entry will be updated to point at the relevant
5482 group_reloc_table entry. */
5485 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5488 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5490 int length
= strlen (group_reloc_table
[i
].name
);
5492 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5493 && (*str
)[length
] == ':')
5495 *out
= &group_reloc_table
[i
];
5496 *str
+= (length
+ 1);
5504 /* Parse a <shifter_operand> for an ARM data processing instruction
5505 (as for parse_shifter_operand) where group relocations are allowed:
5508 #<immediate>, <rotate>
5509 #:<group_reloc>:<expression>
5513 where <group_reloc> is one of the strings defined in group_reloc_table.
5514 The hashes are optional.
5516 Everything else is as for parse_shifter_operand. */
5518 static parse_operand_result
5519 parse_shifter_operand_group_reloc (char **str
, int i
)
5521 /* Determine if we have the sequence of characters #: or just :
5522 coming next. If we do, then we check for a group relocation.
5523 If we don't, punt the whole lot to parse_shifter_operand. */
5525 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5526 || (*str
)[0] == ':')
5528 struct group_reloc_table_entry
*entry
;
5530 if ((*str
)[0] == '#')
5535 /* Try to parse a group relocation. Anything else is an error. */
5536 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5538 inst
.error
= _("unknown group relocation");
5539 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5542 /* We now have the group relocation table entry corresponding to
5543 the name in the assembler source. Next, we parse the expression. */
5544 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5545 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5547 /* Record the relocation type (always the ALU variant here). */
5548 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5549 gas_assert (inst
.relocs
[0].type
!= 0);
5551 return PARSE_OPERAND_SUCCESS
;
5554 return parse_shifter_operand (str
, i
) == SUCCESS
5555 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5557 /* Never reached. */
5560 /* Parse a Neon alignment expression. Information is written to
5561 inst.operands[i]. We assume the initial ':' has been skipped.
5563 align .imm = align << 8, .immisalign=1, .preind=0 */
5564 static parse_operand_result
5565 parse_neon_alignment (char **str
, int i
)
5570 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5572 if (exp
.X_op
!= O_constant
)
5574 inst
.error
= _("alignment must be constant");
5575 return PARSE_OPERAND_FAIL
;
5578 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5579 inst
.operands
[i
].immisalign
= 1;
5580 /* Alignments are not pre-indexes. */
5581 inst
.operands
[i
].preind
= 0;
5584 return PARSE_OPERAND_SUCCESS
;
5587 /* Parse all forms of an ARM address expression. Information is written
5588 to inst.operands[i] and/or inst.relocs[0].
5590 Preindexed addressing (.preind=1):
5592 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5593 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5594 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5595 .shift_kind=shift .relocs[0].exp=shift_imm
5597 These three may have a trailing ! which causes .writeback to be set also.
5599 Postindexed addressing (.postind=1, .writeback=1):
5601 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5602 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5603 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5604 .shift_kind=shift .relocs[0].exp=shift_imm
5606 Unindexed addressing (.preind=0, .postind=0):
5608 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5612 [Rn]{!} shorthand for [Rn,#0]{!}
5613 =immediate .isreg=0 .relocs[0].exp=immediate
5614 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5616 It is the caller's responsibility to check for addressing modes not
5617 supported by the instruction, and to set inst.relocs[0].type. */
5619 static parse_operand_result
5620 parse_address_main (char **str
, int i
, int group_relocations
,
5621 group_reloc_type group_type
)
5626 if (skip_past_char (&p
, '[') == FAIL
)
5628 if (skip_past_char (&p
, '=') == FAIL
)
5630 /* Bare address - translate to PC-relative offset. */
5631 inst
.relocs
[0].pc_rel
= 1;
5632 inst
.operands
[i
].reg
= REG_PC
;
5633 inst
.operands
[i
].isreg
= 1;
5634 inst
.operands
[i
].preind
= 1;
5636 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5637 return PARSE_OPERAND_FAIL
;
5639 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5640 /*allow_symbol_p=*/TRUE
))
5641 return PARSE_OPERAND_FAIL
;
5644 return PARSE_OPERAND_SUCCESS
;
5647 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5648 skip_whitespace (p
);
5650 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5652 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5653 return PARSE_OPERAND_FAIL
;
5655 inst
.operands
[i
].reg
= reg
;
5656 inst
.operands
[i
].isreg
= 1;
5658 if (skip_past_comma (&p
) == SUCCESS
)
5660 inst
.operands
[i
].preind
= 1;
5663 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5665 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5667 inst
.operands
[i
].imm
= reg
;
5668 inst
.operands
[i
].immisreg
= 1;
5670 if (skip_past_comma (&p
) == SUCCESS
)
5671 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5672 return PARSE_OPERAND_FAIL
;
5674 else if (skip_past_char (&p
, ':') == SUCCESS
)
5676 /* FIXME: '@' should be used here, but it's filtered out by generic
5677 code before we get to see it here. This may be subject to
5679 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5681 if (result
!= PARSE_OPERAND_SUCCESS
)
5686 if (inst
.operands
[i
].negative
)
5688 inst
.operands
[i
].negative
= 0;
5692 if (group_relocations
5693 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5695 struct group_reloc_table_entry
*entry
;
5697 /* Skip over the #: or : sequence. */
5703 /* Try to parse a group relocation. Anything else is an
5705 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5707 inst
.error
= _("unknown group relocation");
5708 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5711 /* We now have the group relocation table entry corresponding to
5712 the name in the assembler source. Next, we parse the
5714 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5715 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5717 /* Record the relocation type. */
5722 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
5727 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5732 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
5739 if (inst
.relocs
[0].type
== 0)
5741 inst
.error
= _("this group relocation is not allowed on this instruction");
5742 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5749 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5750 return PARSE_OPERAND_FAIL
;
5751 /* If the offset is 0, find out if it's a +0 or -0. */
5752 if (inst
.relocs
[0].exp
.X_op
== O_constant
5753 && inst
.relocs
[0].exp
.X_add_number
== 0)
5755 skip_whitespace (q
);
5759 skip_whitespace (q
);
5762 inst
.operands
[i
].negative
= 1;
5767 else if (skip_past_char (&p
, ':') == SUCCESS
)
5769 /* FIXME: '@' should be used here, but it's filtered out by generic code
5770 before we get to see it here. This may be subject to change. */
5771 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5773 if (result
!= PARSE_OPERAND_SUCCESS
)
5777 if (skip_past_char (&p
, ']') == FAIL
)
5779 inst
.error
= _("']' expected");
5780 return PARSE_OPERAND_FAIL
;
5783 if (skip_past_char (&p
, '!') == SUCCESS
)
5784 inst
.operands
[i
].writeback
= 1;
5786 else if (skip_past_comma (&p
) == SUCCESS
)
5788 if (skip_past_char (&p
, '{') == SUCCESS
)
5790 /* [Rn], {expr} - unindexed, with option */
5791 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5792 0, 255, TRUE
) == FAIL
)
5793 return PARSE_OPERAND_FAIL
;
5795 if (skip_past_char (&p
, '}') == FAIL
)
5797 inst
.error
= _("'}' expected at end of 'option' field");
5798 return PARSE_OPERAND_FAIL
;
5800 if (inst
.operands
[i
].preind
)
5802 inst
.error
= _("cannot combine index with option");
5803 return PARSE_OPERAND_FAIL
;
5806 return PARSE_OPERAND_SUCCESS
;
5810 inst
.operands
[i
].postind
= 1;
5811 inst
.operands
[i
].writeback
= 1;
5813 if (inst
.operands
[i
].preind
)
5815 inst
.error
= _("cannot combine pre- and post-indexing");
5816 return PARSE_OPERAND_FAIL
;
5820 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5822 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5824 /* We might be using the immediate for alignment already. If we
5825 are, OR the register number into the low-order bits. */
5826 if (inst
.operands
[i
].immisalign
)
5827 inst
.operands
[i
].imm
|= reg
;
5829 inst
.operands
[i
].imm
= reg
;
5830 inst
.operands
[i
].immisreg
= 1;
5832 if (skip_past_comma (&p
) == SUCCESS
)
5833 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5834 return PARSE_OPERAND_FAIL
;
5840 if (inst
.operands
[i
].negative
)
5842 inst
.operands
[i
].negative
= 0;
5845 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5846 return PARSE_OPERAND_FAIL
;
5847 /* If the offset is 0, find out if it's a +0 or -0. */
5848 if (inst
.relocs
[0].exp
.X_op
== O_constant
5849 && inst
.relocs
[0].exp
.X_add_number
== 0)
5851 skip_whitespace (q
);
5855 skip_whitespace (q
);
5858 inst
.operands
[i
].negative
= 1;
5864 /* If at this point neither .preind nor .postind is set, we have a
5865 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5866 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5868 inst
.operands
[i
].preind
= 1;
5869 inst
.relocs
[0].exp
.X_op
= O_constant
;
5870 inst
.relocs
[0].exp
.X_add_number
= 0;
5873 return PARSE_OPERAND_SUCCESS
;
5877 parse_address (char **str
, int i
)
5879 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5883 static parse_operand_result
5884 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5886 return parse_address_main (str
, i
, 1, type
);
5889 /* Parse an operand for a MOVW or MOVT instruction. */
5891 parse_half (char **str
)
5896 skip_past_char (&p
, '#');
5897 if (strncasecmp (p
, ":lower16:", 9) == 0)
5898 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
5899 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5900 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
5902 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
5905 skip_whitespace (p
);
5908 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5911 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
5913 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
5915 inst
.error
= _("constant expression expected");
5918 if (inst
.relocs
[0].exp
.X_add_number
< 0
5919 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
5921 inst
.error
= _("immediate value out of range");
5929 /* Miscellaneous. */
5931 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5932 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5934 parse_psr (char **str
, bfd_boolean lhs
)
5937 unsigned long psr_field
;
5938 const struct asm_psr
*psr
;
5940 bfd_boolean is_apsr
= FALSE
;
5941 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5943 /* PR gas/12698: If the user has specified -march=all then m_profile will
5944 be TRUE, but we want to ignore it in this case as we are building for any
5945 CPU type, including non-m variants. */
5946 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5949 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5950 feature for ease of use and backwards compatibility. */
5952 if (strncasecmp (p
, "SPSR", 4) == 0)
5955 goto unsupported_psr
;
5957 psr_field
= SPSR_BIT
;
5959 else if (strncasecmp (p
, "CPSR", 4) == 0)
5962 goto unsupported_psr
;
5966 else if (strncasecmp (p
, "APSR", 4) == 0)
5968 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5969 and ARMv7-R architecture CPUs. */
5978 while (ISALNUM (*p
) || *p
== '_');
5980 if (strncasecmp (start
, "iapsr", 5) == 0
5981 || strncasecmp (start
, "eapsr", 5) == 0
5982 || strncasecmp (start
, "xpsr", 4) == 0
5983 || strncasecmp (start
, "psr", 3) == 0)
5984 p
= start
+ strcspn (start
, "rR") + 1;
5986 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5992 /* If APSR is being written, a bitfield may be specified. Note that
5993 APSR itself is handled above. */
5994 if (psr
->field
<= 3)
5996 psr_field
= psr
->field
;
6002 /* M-profile MSR instructions have the mask field set to "10", except
6003 *PSR variants which modify APSR, which may use a different mask (and
6004 have been handled already). Do that by setting the PSR_f field
6006 return psr
->field
| (lhs
? PSR_f
: 0);
6009 goto unsupported_psr
;
6015 /* A suffix follows. */
6021 while (ISALNUM (*p
) || *p
== '_');
6025 /* APSR uses a notation for bits, rather than fields. */
6026 unsigned int nzcvq_bits
= 0;
6027 unsigned int g_bit
= 0;
6030 for (bit
= start
; bit
!= p
; bit
++)
6032 switch (TOLOWER (*bit
))
6035 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
6039 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
6043 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
6047 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6051 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6055 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6059 inst
.error
= _("unexpected bit specified after APSR");
6064 if (nzcvq_bits
== 0x1f)
6069 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6071 inst
.error
= _("selected processor does not "
6072 "support DSP extension");
6079 if ((nzcvq_bits
& 0x20) != 0
6080 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6081 || (g_bit
& 0x2) != 0)
6083 inst
.error
= _("bad bitmask specified after APSR");
6089 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
6094 psr_field
|= psr
->field
;
6100 goto error
; /* Garbage after "[CS]PSR". */
6102 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6103 is deprecated, but allow it anyway. */
6107 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6110 else if (!m_profile
)
6111 /* These bits are never right for M-profile devices: don't set them
6112 (only code paths which read/write APSR reach here). */
6113 psr_field
|= (PSR_c
| PSR_f
);
6119 inst
.error
= _("selected processor does not support requested special "
6120 "purpose register");
6124 inst
.error
= _("flag for {c}psr instruction expected");
6129 parse_sys_vldr_vstr (char **str
)
6138 {"FPSCR", 0x1, 0x0},
6139 {"FPSCR_nzcvqc", 0x2, 0x0},
6142 {"FPCXTNS", 0x6, 0x1},
6143 {"FPCXTS", 0x7, 0x1}
6145 char *op_end
= strchr (*str
, ',');
6146 size_t op_strlen
= op_end
- *str
;
6148 for (i
= 0; i
< sizeof (sysregs
) / sizeof (sysregs
[0]); i
++)
6150 if (!strncmp (*str
, sysregs
[i
].name
, op_strlen
))
6152 val
= sysregs
[i
].regl
| (sysregs
[i
].regh
<< 3);
6161 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6162 value suitable for splatting into the AIF field of the instruction. */
6165 parse_cps_flags (char **str
)
6174 case '\0': case ',':
6177 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6178 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6179 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6182 inst
.error
= _("unrecognized CPS flag");
6187 if (saw_a_flag
== 0)
6189 inst
.error
= _("missing CPS flags");
6197 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6198 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6201 parse_endian_specifier (char **str
)
6206 if (strncasecmp (s
, "BE", 2))
6208 else if (strncasecmp (s
, "LE", 2))
6212 inst
.error
= _("valid endian specifiers are be or le");
6216 if (ISALNUM (s
[2]) || s
[2] == '_')
6218 inst
.error
= _("valid endian specifiers are be or le");
6223 return little_endian
;
6226 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6227 value suitable for poking into the rotate field of an sxt or sxta
6228 instruction, or FAIL on error. */
6231 parse_ror (char **str
)
6236 if (strncasecmp (s
, "ROR", 3) == 0)
6240 inst
.error
= _("missing rotation field after comma");
6244 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6249 case 0: *str
= s
; return 0x0;
6250 case 8: *str
= s
; return 0x1;
6251 case 16: *str
= s
; return 0x2;
6252 case 24: *str
= s
; return 0x3;
6255 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6260 /* Parse a conditional code (from conds[] below). The value returned is in the
6261 range 0 .. 14, or FAIL. */
6263 parse_cond (char **str
)
6266 const struct asm_cond
*c
;
6268 /* Condition codes are always 2 characters, so matching up to
6269 3 characters is sufficient. */
6274 while (ISALPHA (*q
) && n
< 3)
6276 cond
[n
] = TOLOWER (*q
);
6281 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6284 inst
.error
= _("condition required");
6292 /* Record a use of the given feature. */
6294 record_feature_use (const arm_feature_set
*feature
)
6297 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6299 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6302 /* If the given feature is currently allowed, mark it as used and return TRUE.
6303 Return FALSE otherwise. */
6305 mark_feature_used (const arm_feature_set
*feature
)
6307 /* Ensure the option is currently allowed. */
6308 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6311 /* Add the appropriate architecture feature for the barrier option used. */
6312 record_feature_use (feature
);
6317 /* Parse an option for a barrier instruction. Returns the encoding for the
6320 parse_barrier (char **str
)
6323 const struct asm_barrier_opt
*o
;
6326 while (ISALPHA (*q
))
6329 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6334 if (!mark_feature_used (&o
->arch
))
6341 /* Parse the operands of a table branch instruction. Similar to a memory
6344 parse_tb (char **str
)
6349 if (skip_past_char (&p
, '[') == FAIL
)
6351 inst
.error
= _("'[' expected");
6355 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6357 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6360 inst
.operands
[0].reg
= reg
;
6362 if (skip_past_comma (&p
) == FAIL
)
6364 inst
.error
= _("',' expected");
6368 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6370 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6373 inst
.operands
[0].imm
= reg
;
6375 if (skip_past_comma (&p
) == SUCCESS
)
6377 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6379 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6381 inst
.error
= _("invalid shift");
6384 inst
.operands
[0].shifted
= 1;
6387 if (skip_past_char (&p
, ']') == FAIL
)
6389 inst
.error
= _("']' expected");
6396 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6397 information on the types the operands can take and how they are encoded.
6398 Up to four operands may be read; this function handles setting the
6399 ".present" field for each read operand itself.
6400 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6401 else returns FAIL. */
6404 parse_neon_mov (char **str
, int *which_operand
)
6406 int i
= *which_operand
, val
;
6407 enum arm_reg_type rtype
;
6409 struct neon_type_el optype
;
6411 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6413 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6414 inst
.operands
[i
].reg
= val
;
6415 inst
.operands
[i
].isscalar
= 1;
6416 inst
.operands
[i
].vectype
= optype
;
6417 inst
.operands
[i
++].present
= 1;
6419 if (skip_past_comma (&ptr
) == FAIL
)
6422 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6425 inst
.operands
[i
].reg
= val
;
6426 inst
.operands
[i
].isreg
= 1;
6427 inst
.operands
[i
].present
= 1;
6429 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6432 /* Cases 0, 1, 2, 3, 5 (D only). */
6433 if (skip_past_comma (&ptr
) == FAIL
)
6436 inst
.operands
[i
].reg
= val
;
6437 inst
.operands
[i
].isreg
= 1;
6438 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6439 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6440 inst
.operands
[i
].isvec
= 1;
6441 inst
.operands
[i
].vectype
= optype
;
6442 inst
.operands
[i
++].present
= 1;
6444 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6446 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6447 Case 13: VMOV <Sd>, <Rm> */
6448 inst
.operands
[i
].reg
= val
;
6449 inst
.operands
[i
].isreg
= 1;
6450 inst
.operands
[i
].present
= 1;
6452 if (rtype
== REG_TYPE_NQ
)
6454 first_error (_("can't use Neon quad register here"));
6457 else if (rtype
!= REG_TYPE_VFS
)
6460 if (skip_past_comma (&ptr
) == FAIL
)
6462 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6464 inst
.operands
[i
].reg
= val
;
6465 inst
.operands
[i
].isreg
= 1;
6466 inst
.operands
[i
].present
= 1;
6469 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6472 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6473 Case 1: VMOV<c><q> <Dd>, <Dm>
6474 Case 8: VMOV.F32 <Sd>, <Sm>
6475 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6477 inst
.operands
[i
].reg
= val
;
6478 inst
.operands
[i
].isreg
= 1;
6479 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6480 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6481 inst
.operands
[i
].isvec
= 1;
6482 inst
.operands
[i
].vectype
= optype
;
6483 inst
.operands
[i
].present
= 1;
6485 if (skip_past_comma (&ptr
) == SUCCESS
)
6490 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6493 inst
.operands
[i
].reg
= val
;
6494 inst
.operands
[i
].isreg
= 1;
6495 inst
.operands
[i
++].present
= 1;
6497 if (skip_past_comma (&ptr
) == FAIL
)
6500 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6503 inst
.operands
[i
].reg
= val
;
6504 inst
.operands
[i
].isreg
= 1;
6505 inst
.operands
[i
].present
= 1;
6508 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6509 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6510 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6511 Case 10: VMOV.F32 <Sd>, #<imm>
6512 Case 11: VMOV.F64 <Dd>, #<imm> */
6513 inst
.operands
[i
].immisfloat
= 1;
6514 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6516 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6517 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6521 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6525 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6528 inst
.operands
[i
].reg
= val
;
6529 inst
.operands
[i
].isreg
= 1;
6530 inst
.operands
[i
++].present
= 1;
6532 if (skip_past_comma (&ptr
) == FAIL
)
6535 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6537 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6538 inst
.operands
[i
].reg
= val
;
6539 inst
.operands
[i
].isscalar
= 1;
6540 inst
.operands
[i
].present
= 1;
6541 inst
.operands
[i
].vectype
= optype
;
6543 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6545 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6546 inst
.operands
[i
].reg
= val
;
6547 inst
.operands
[i
].isreg
= 1;
6548 inst
.operands
[i
++].present
= 1;
6550 if (skip_past_comma (&ptr
) == FAIL
)
6553 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6556 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6560 inst
.operands
[i
].reg
= val
;
6561 inst
.operands
[i
].isreg
= 1;
6562 inst
.operands
[i
].isvec
= 1;
6563 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6564 inst
.operands
[i
].vectype
= optype
;
6565 inst
.operands
[i
].present
= 1;
6567 if (rtype
== REG_TYPE_VFS
)
6571 if (skip_past_comma (&ptr
) == FAIL
)
6573 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6576 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6579 inst
.operands
[i
].reg
= val
;
6580 inst
.operands
[i
].isreg
= 1;
6581 inst
.operands
[i
].isvec
= 1;
6582 inst
.operands
[i
].issingle
= 1;
6583 inst
.operands
[i
].vectype
= optype
;
6584 inst
.operands
[i
].present
= 1;
6587 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6591 inst
.operands
[i
].reg
= val
;
6592 inst
.operands
[i
].isreg
= 1;
6593 inst
.operands
[i
].isvec
= 1;
6594 inst
.operands
[i
].issingle
= 1;
6595 inst
.operands
[i
].vectype
= optype
;
6596 inst
.operands
[i
].present
= 1;
6601 first_error (_("parse error"));
6605 /* Successfully parsed the operands. Update args. */
6611 first_error (_("expected comma"));
6615 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6619 /* Use this macro when the operand constraints are different
6620 for ARM and THUMB (e.g. ldrd). */
6621 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6622 ((arm_operand) | ((thumb_operand) << 16))
6624 /* Matcher codes for parse_operands. */
6625 enum operand_parse_code
6627 OP_stop
, /* end of line */
6629 OP_RR
, /* ARM register */
6630 OP_RRnpc
, /* ARM register, not r15 */
6631 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6632 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6633 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6634 optional trailing ! */
6635 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6636 OP_RCP
, /* Coprocessor number */
6637 OP_RCN
, /* Coprocessor register */
6638 OP_RF
, /* FPA register */
6639 OP_RVS
, /* VFP single precision register */
6640 OP_RVD
, /* VFP double precision register (0..15) */
6641 OP_RND
, /* Neon double precision register (0..31) */
6642 OP_RNQ
, /* Neon quad precision register */
6643 OP_RVSD
, /* VFP single or double precision register */
6644 OP_RNSD
, /* Neon single or double precision register */
6645 OP_RNDQ
, /* Neon double or quad precision register */
6646 OP_RNSDQ
, /* Neon single, double or quad precision register */
6647 OP_RNSC
, /* Neon scalar D[X] */
6648 OP_RVC
, /* VFP control register */
6649 OP_RMF
, /* Maverick F register */
6650 OP_RMD
, /* Maverick D register */
6651 OP_RMFX
, /* Maverick FX register */
6652 OP_RMDX
, /* Maverick DX register */
6653 OP_RMAX
, /* Maverick AX register */
6654 OP_RMDS
, /* Maverick DSPSC register */
6655 OP_RIWR
, /* iWMMXt wR register */
6656 OP_RIWC
, /* iWMMXt wC register */
6657 OP_RIWG
, /* iWMMXt wCG register */
6658 OP_RXA
, /* XScale accumulator register */
6660 /* New operands for Armv8.1-M Mainline. */
6661 OP_LR
, /* ARM LR register */
6662 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
6664 OP_REGLST
, /* ARM register list */
6665 OP_CLRMLST
, /* CLRM register list */
6666 OP_VRSLST
, /* VFP single-precision register list */
6667 OP_VRDLST
, /* VFP double-precision register list */
6668 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6669 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6670 OP_NSTRLST
, /* Neon element/structure list */
6671 OP_VRSDVLST
, /* VFP single or double-precision register list and VPR */
6673 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6674 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6675 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6676 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6677 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
6678 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6679 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6680 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6681 OP_VMOV
, /* Neon VMOV operands. */
6682 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6683 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6684 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6685 OP_VLDR
, /* VLDR operand. */
6687 OP_I0
, /* immediate zero */
6688 OP_I7
, /* immediate value 0 .. 7 */
6689 OP_I15
, /* 0 .. 15 */
6690 OP_I16
, /* 1 .. 16 */
6691 OP_I16z
, /* 0 .. 16 */
6692 OP_I31
, /* 0 .. 31 */
6693 OP_I31w
, /* 0 .. 31, optional trailing ! */
6694 OP_I32
, /* 1 .. 32 */
6695 OP_I32z
, /* 0 .. 32 */
6696 OP_I63
, /* 0 .. 63 */
6697 OP_I63s
, /* -64 .. 63 */
6698 OP_I64
, /* 1 .. 64 */
6699 OP_I64z
, /* 0 .. 64 */
6700 OP_I255
, /* 0 .. 255 */
6702 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6703 OP_I7b
, /* 0 .. 7 */
6704 OP_I15b
, /* 0 .. 15 */
6705 OP_I31b
, /* 0 .. 31 */
6707 OP_SH
, /* shifter operand */
6708 OP_SHG
, /* shifter operand with possible group relocation */
6709 OP_ADDR
, /* Memory address expression (any mode) */
6710 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6711 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6712 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6713 OP_EXP
, /* arbitrary expression */
6714 OP_EXPi
, /* same, with optional immediate prefix */
6715 OP_EXPr
, /* same, with optional relocation suffix */
6716 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
6717 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6718 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6719 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6721 OP_CPSF
, /* CPS flags */
6722 OP_ENDI
, /* Endianness specifier */
6723 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6724 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6725 OP_COND
, /* conditional code */
6726 OP_TB
, /* Table branch. */
6728 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6730 OP_RRnpc_I0
, /* ARM register or literal 0 */
6731 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6732 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6733 OP_RF_IF
, /* FPA register or immediate */
6734 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6735 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6737 /* Optional operands. */
6738 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6739 OP_oI31b
, /* 0 .. 31 */
6740 OP_oI32b
, /* 1 .. 32 */
6741 OP_oI32z
, /* 0 .. 32 */
6742 OP_oIffffb
, /* 0 .. 65535 */
6743 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6745 OP_oRR
, /* ARM register */
6746 OP_oLR
, /* ARM LR register */
6747 OP_oRRnpc
, /* ARM register, not the PC */
6748 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6749 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6750 OP_oRND
, /* Optional Neon double precision register */
6751 OP_oRNQ
, /* Optional Neon quad precision register */
6752 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6753 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6754 OP_oSHll
, /* LSL immediate */
6755 OP_oSHar
, /* ASR immediate */
6756 OP_oSHllar
, /* LSL or ASR immediate */
6757 OP_oROR
, /* ROR 0/8/16/24 */
6758 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6760 /* Some pre-defined mixed (ARM/THUMB) operands. */
6761 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6762 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6763 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6765 OP_FIRST_OPTIONAL
= OP_oI7b
6768 /* Generic instruction operand parser. This does no encoding and no
6769 semantic validation; it merely squirrels values away in the inst
6770 structure. Returns SUCCESS or FAIL depending on whether the
6771 specified grammar matched. */
6773 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6775 unsigned const int *upat
= pattern
;
6776 char *backtrack_pos
= 0;
6777 const char *backtrack_error
= 0;
6778 int i
, val
= 0, backtrack_index
= 0;
6779 enum arm_reg_type rtype
;
6780 parse_operand_result result
;
6781 unsigned int op_parse_code
;
6782 bfd_boolean partial_match
;
6784 #define po_char_or_fail(chr) \
6787 if (skip_past_char (&str, chr) == FAIL) \
6792 #define po_reg_or_fail(regtype) \
6795 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6796 & inst.operands[i].vectype); \
6799 first_error (_(reg_expected_msgs[regtype])); \
6802 inst.operands[i].reg = val; \
6803 inst.operands[i].isreg = 1; \
6804 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6805 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6806 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6807 || rtype == REG_TYPE_VFD \
6808 || rtype == REG_TYPE_NQ); \
6812 #define po_reg_or_goto(regtype, label) \
6815 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6816 & inst.operands[i].vectype); \
6820 inst.operands[i].reg = val; \
6821 inst.operands[i].isreg = 1; \
6822 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6823 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6824 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6825 || rtype == REG_TYPE_VFD \
6826 || rtype == REG_TYPE_NQ); \
6830 #define po_imm_or_fail(min, max, popt) \
6833 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6835 inst.operands[i].imm = val; \
6839 #define po_scalar_or_goto(elsz, label) \
6842 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6845 inst.operands[i].reg = val; \
6846 inst.operands[i].isscalar = 1; \
6850 #define po_misc_or_fail(expr) \
6858 #define po_misc_or_fail_no_backtrack(expr) \
6862 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6863 backtrack_pos = 0; \
6864 if (result != PARSE_OPERAND_SUCCESS) \
6869 #define po_barrier_or_imm(str) \
6872 val = parse_barrier (&str); \
6873 if (val == FAIL && ! ISALPHA (*str)) \
6876 /* ISB can only take SY as an option. */ \
6877 || ((inst.instruction & 0xf0) == 0x60 \
6880 inst.error = _("invalid barrier type"); \
6881 backtrack_pos = 0; \
6887 skip_whitespace (str
);
6889 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6891 op_parse_code
= upat
[i
];
6892 if (op_parse_code
>= 1<<16)
6893 op_parse_code
= thumb
? (op_parse_code
>> 16)
6894 : (op_parse_code
& ((1<<16)-1));
6896 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6898 /* Remember where we are in case we need to backtrack. */
6899 gas_assert (!backtrack_pos
);
6900 backtrack_pos
= str
;
6901 backtrack_error
= inst
.error
;
6902 backtrack_index
= i
;
6905 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6906 po_char_or_fail (',');
6908 switch (op_parse_code
)
6918 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6919 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6920 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6921 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6922 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6923 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6925 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6927 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6929 /* Also accept generic coprocessor regs for unknown registers. */
6931 po_reg_or_fail (REG_TYPE_CN
);
6933 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6934 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6935 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6936 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6937 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6938 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6939 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6940 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6941 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6942 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6944 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6945 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
6947 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6948 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6950 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6952 /* Neon scalar. Using an element size of 8 means that some invalid
6953 scalars are accepted here, so deal with those in later code. */
6954 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6958 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6961 po_imm_or_fail (0, 0, TRUE
);
6966 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6971 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6974 if (parse_ifimm_zero (&str
))
6975 inst
.operands
[i
].imm
= 0;
6979 = _("only floating point zero is allowed as immediate value");
6987 po_scalar_or_goto (8, try_rr
);
6990 po_reg_or_fail (REG_TYPE_RN
);
6996 po_scalar_or_goto (8, try_nsdq
);
6999 po_reg_or_fail (REG_TYPE_NSDQ
);
7005 po_scalar_or_goto (8, try_s_scalar
);
7008 po_scalar_or_goto (4, try_nsd
);
7011 po_reg_or_fail (REG_TYPE_NSD
);
7017 po_scalar_or_goto (8, try_ndq
);
7020 po_reg_or_fail (REG_TYPE_NDQ
);
7026 po_scalar_or_goto (8, try_vfd
);
7029 po_reg_or_fail (REG_TYPE_VFD
);
7034 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7035 not careful then bad things might happen. */
7036 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
7041 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
7044 /* There's a possibility of getting a 64-bit immediate here, so
7045 we need special handling. */
7046 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
7049 inst
.error
= _("immediate value is out of range");
7057 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
7060 po_imm_or_fail (0, 63, TRUE
);
7065 po_char_or_fail ('[');
7066 po_reg_or_fail (REG_TYPE_RN
);
7067 po_char_or_fail (']');
7073 po_reg_or_fail (REG_TYPE_RN
);
7074 if (skip_past_char (&str
, '!') == SUCCESS
)
7075 inst
.operands
[i
].writeback
= 1;
7079 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
7080 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
7081 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
7082 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
7083 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
7084 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
7085 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
7086 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
7087 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
7088 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
7089 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
7090 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
7092 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
7094 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
7095 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
7097 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
7098 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
7099 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
7100 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
7102 /* Immediate variants */
7104 po_char_or_fail ('{');
7105 po_imm_or_fail (0, 255, TRUE
);
7106 po_char_or_fail ('}');
7110 /* The expression parser chokes on a trailing !, so we have
7111 to find it first and zap it. */
7114 while (*s
&& *s
!= ',')
7119 inst
.operands
[i
].writeback
= 1;
7121 po_imm_or_fail (0, 31, TRUE
);
7129 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7134 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7139 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7141 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7143 val
= parse_reloc (&str
);
7146 inst
.error
= _("unrecognized relocation suffix");
7149 else if (val
!= BFD_RELOC_UNUSED
)
7151 inst
.operands
[i
].imm
= val
;
7152 inst
.operands
[i
].hasreloc
= 1;
7158 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7160 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7162 inst
.operands
[i
].hasreloc
= 1;
7164 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7166 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7167 inst
.operands
[i
].hasreloc
= 0;
7171 /* Operand for MOVW or MOVT. */
7173 po_misc_or_fail (parse_half (&str
));
7176 /* Register or expression. */
7177 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7178 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7180 /* Register or immediate. */
7181 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7182 I0
: po_imm_or_fail (0, 0, FALSE
); break;
7184 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7186 if (!is_immediate_prefix (*str
))
7189 val
= parse_fpa_immediate (&str
);
7192 /* FPA immediates are encoded as registers 8-15.
7193 parse_fpa_immediate has already applied the offset. */
7194 inst
.operands
[i
].reg
= val
;
7195 inst
.operands
[i
].isreg
= 1;
7198 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7199 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7201 /* Two kinds of register. */
7204 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7206 || (rege
->type
!= REG_TYPE_MMXWR
7207 && rege
->type
!= REG_TYPE_MMXWC
7208 && rege
->type
!= REG_TYPE_MMXWCG
))
7210 inst
.error
= _("iWMMXt data or control register expected");
7213 inst
.operands
[i
].reg
= rege
->number
;
7214 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7220 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7222 || (rege
->type
!= REG_TYPE_MMXWC
7223 && rege
->type
!= REG_TYPE_MMXWCG
))
7225 inst
.error
= _("iWMMXt control register expected");
7228 inst
.operands
[i
].reg
= rege
->number
;
7229 inst
.operands
[i
].isreg
= 1;
7234 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7235 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7236 case OP_oROR
: val
= parse_ror (&str
); break;
7237 case OP_COND
: val
= parse_cond (&str
); break;
7238 case OP_oBARRIER_I15
:
7239 po_barrier_or_imm (str
); break;
7241 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7247 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7248 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7250 inst
.error
= _("Banked registers are not available with this "
7256 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7260 po_reg_or_goto (REG_TYPE_VFSD
, try_sysreg
);
7263 val
= parse_sys_vldr_vstr (&str
);
7267 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7270 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7272 if (strncasecmp (str
, "APSR_", 5) == 0)
7279 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7280 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7281 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7282 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7283 default: found
= 16;
7287 inst
.operands
[i
].isvec
= 1;
7288 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7289 inst
.operands
[i
].reg
= REG_PC
;
7296 po_misc_or_fail (parse_tb (&str
));
7299 /* Register lists. */
7301 val
= parse_reg_list (&str
, REGLIST_RN
);
7304 inst
.operands
[i
].writeback
= 1;
7310 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7314 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
,
7319 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
,
7324 /* Allow Q registers too. */
7325 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7326 REGLIST_NEON_D
, &partial_match
);
7330 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7331 REGLIST_VFP_S
, &partial_match
);
7332 inst
.operands
[i
].issingle
= 1;
7337 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7338 REGLIST_VFP_D_VPR
, &partial_match
);
7339 if (val
== FAIL
&& !partial_match
)
7342 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7343 REGLIST_VFP_S_VPR
, &partial_match
);
7344 inst
.operands
[i
].issingle
= 1;
7349 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7350 REGLIST_NEON_D
, &partial_match
);
7354 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7355 &inst
.operands
[i
].vectype
);
7358 /* Addressing modes */
7360 po_misc_or_fail (parse_address (&str
, i
));
7364 po_misc_or_fail_no_backtrack (
7365 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7369 po_misc_or_fail_no_backtrack (
7370 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7374 po_misc_or_fail_no_backtrack (
7375 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7379 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7383 po_misc_or_fail_no_backtrack (
7384 parse_shifter_operand_group_reloc (&str
, i
));
7388 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7392 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7396 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7400 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7403 /* Various value-based sanity checks and shared operations. We
7404 do not signal immediate failures for the register constraints;
7405 this allows a syntax error to take precedence. */
7406 switch (op_parse_code
)
7414 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7415 inst
.error
= BAD_PC
;
7420 if (inst
.operands
[i
].isreg
)
7422 if (inst
.operands
[i
].reg
== REG_PC
)
7423 inst
.error
= BAD_PC
;
7424 else if (inst
.operands
[i
].reg
== REG_SP
7425 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7426 relaxed since ARMv8-A. */
7427 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7430 inst
.error
= BAD_SP
;
7436 if (inst
.operands
[i
].isreg
7437 && inst
.operands
[i
].reg
== REG_PC
7438 && (inst
.operands
[i
].writeback
|| thumb
))
7439 inst
.error
= BAD_PC
;
7443 if (inst
.operands
[i
].isreg
)
7452 case OP_oBARRIER_I15
:
7463 inst
.operands
[i
].imm
= val
;
7468 if (inst
.operands
[i
].reg
!= REG_LR
)
7469 inst
.error
= _("operand must be LR register");
7476 /* If we get here, this operand was successfully parsed. */
7477 inst
.operands
[i
].present
= 1;
7481 inst
.error
= BAD_ARGS
;
7486 /* The parse routine should already have set inst.error, but set a
7487 default here just in case. */
7489 inst
.error
= _("syntax error");
7493 /* Do not backtrack over a trailing optional argument that
7494 absorbed some text. We will only fail again, with the
7495 'garbage following instruction' error message, which is
7496 probably less helpful than the current one. */
7497 if (backtrack_index
== i
&& backtrack_pos
!= str
7498 && upat
[i
+1] == OP_stop
)
7501 inst
.error
= _("syntax error");
7505 /* Try again, skipping the optional argument at backtrack_pos. */
7506 str
= backtrack_pos
;
7507 inst
.error
= backtrack_error
;
7508 inst
.operands
[backtrack_index
].present
= 0;
7509 i
= backtrack_index
;
7513 /* Check that we have parsed all the arguments. */
7514 if (*str
!= '\0' && !inst
.error
)
7515 inst
.error
= _("garbage following instruction");
7517 return inst
.error
? FAIL
: SUCCESS
;
7520 #undef po_char_or_fail
7521 #undef po_reg_or_fail
7522 #undef po_reg_or_goto
7523 #undef po_imm_or_fail
7524 #undef po_scalar_or_fail
7525 #undef po_barrier_or_imm
7527 /* Shorthand macro for instruction encoding functions issuing errors. */
7528 #define constraint(expr, err) \
7539 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7540 instructions are unpredictable if these registers are used. This
7541 is the BadReg predicate in ARM's Thumb-2 documentation.
7543 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7544 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7545 #define reject_bad_reg(reg) \
7547 if (reg == REG_PC) \
7549 inst.error = BAD_PC; \
7552 else if (reg == REG_SP \
7553 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7555 inst.error = BAD_SP; \
7560 /* If REG is R13 (the stack pointer), warn that its use is
7562 #define warn_deprecated_sp(reg) \
7564 if (warn_on_deprecated && reg == REG_SP) \
7565 as_tsktsk (_("use of r13 is deprecated")); \
7568 /* Functions for operand encoding. ARM, then Thumb. */
7570 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7572 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7574 The only binary encoding difference is the Coprocessor number. Coprocessor
7575 9 is used for half-precision calculations or conversions. The format of the
7576 instruction is the same as the equivalent Coprocessor 10 instruction that
7577 exists for Single-Precision operation. */
7580 do_scalar_fp16_v82_encode (void)
7582 if (inst
.cond
!= COND_ALWAYS
)
7583 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7584 " the behaviour is UNPREDICTABLE"));
7585 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7588 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7589 mark_feature_used (&arm_ext_fp16
);
7592 /* If VAL can be encoded in the immediate field of an ARM instruction,
7593 return the encoded form. Otherwise, return FAIL. */
7596 encode_arm_immediate (unsigned int val
)
7603 for (i
= 2; i
< 32; i
+= 2)
7604 if ((a
= rotate_left (val
, i
)) <= 0xff)
7605 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7610 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7611 return the encoded form. Otherwise, return FAIL. */
7613 encode_thumb32_immediate (unsigned int val
)
7620 for (i
= 1; i
<= 24; i
++)
7623 if ((val
& ~(0xff << i
)) == 0)
7624 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7628 if (val
== ((a
<< 16) | a
))
7630 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7634 if (val
== ((a
<< 16) | a
))
7635 return 0x200 | (a
>> 8);
7639 /* Encode a VFP SP or DP register number into inst.instruction. */
7642 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7644 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7647 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7650 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7653 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7658 first_error (_("D register out of range for selected VFP version"));
7666 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7670 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7674 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7678 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7682 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7686 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7694 /* Encode a <shift> in an ARM-format instruction. The immediate,
7695 if any, is handled by md_apply_fix. */
7697 encode_arm_shift (int i
)
7699 /* register-shifted register. */
7700 if (inst
.operands
[i
].immisreg
)
7703 for (op_index
= 0; op_index
<= i
; ++op_index
)
7705 /* Check the operand only when it's presented. In pre-UAL syntax,
7706 if the destination register is the same as the first operand, two
7707 register form of the instruction can be used. */
7708 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7709 && inst
.operands
[op_index
].reg
== REG_PC
)
7710 as_warn (UNPRED_REG ("r15"));
7713 if (inst
.operands
[i
].imm
== REG_PC
)
7714 as_warn (UNPRED_REG ("r15"));
7717 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7718 inst
.instruction
|= SHIFT_ROR
<< 5;
7721 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7722 if (inst
.operands
[i
].immisreg
)
7724 inst
.instruction
|= SHIFT_BY_REG
;
7725 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7728 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7733 encode_arm_shifter_operand (int i
)
7735 if (inst
.operands
[i
].isreg
)
7737 inst
.instruction
|= inst
.operands
[i
].reg
;
7738 encode_arm_shift (i
);
7742 inst
.instruction
|= INST_IMMEDIATE
;
7743 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
7744 inst
.instruction
|= inst
.operands
[i
].imm
;
7748 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7750 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7753 Generate an error if the operand is not a register. */
7754 constraint (!inst
.operands
[i
].isreg
,
7755 _("Instruction does not support =N addresses"));
7757 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7759 if (inst
.operands
[i
].preind
)
7763 inst
.error
= _("instruction does not accept preindexed addressing");
7766 inst
.instruction
|= PRE_INDEX
;
7767 if (inst
.operands
[i
].writeback
)
7768 inst
.instruction
|= WRITE_BACK
;
7771 else if (inst
.operands
[i
].postind
)
7773 gas_assert (inst
.operands
[i
].writeback
);
7775 inst
.instruction
|= WRITE_BACK
;
7777 else /* unindexed - only for coprocessor */
7779 inst
.error
= _("instruction does not accept unindexed addressing");
7783 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7784 && (((inst
.instruction
& 0x000f0000) >> 16)
7785 == ((inst
.instruction
& 0x0000f000) >> 12)))
7786 as_warn ((inst
.instruction
& LOAD_BIT
)
7787 ? _("destination register same as write-back base")
7788 : _("source register same as write-back base"));
7791 /* inst.operands[i] was set up by parse_address. Encode it into an
7792 ARM-format mode 2 load or store instruction. If is_t is true,
7793 reject forms that cannot be used with a T instruction (i.e. not
7796 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7798 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7800 encode_arm_addr_mode_common (i
, is_t
);
7802 if (inst
.operands
[i
].immisreg
)
7804 constraint ((inst
.operands
[i
].imm
== REG_PC
7805 || (is_pc
&& inst
.operands
[i
].writeback
)),
7807 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7808 inst
.instruction
|= inst
.operands
[i
].imm
;
7809 if (!inst
.operands
[i
].negative
)
7810 inst
.instruction
|= INDEX_UP
;
7811 if (inst
.operands
[i
].shifted
)
7813 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7814 inst
.instruction
|= SHIFT_ROR
<< 5;
7817 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7818 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7822 else /* immediate offset in inst.relocs[0] */
7824 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
7826 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7828 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7829 cannot use PC in addressing.
7830 PC cannot be used in writeback addressing, either. */
7831 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7834 /* Use of PC in str is deprecated for ARMv7. */
7835 if (warn_on_deprecated
7837 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7838 as_tsktsk (_("use of PC in this instruction is deprecated"));
7841 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7843 /* Prefer + for zero encoded value. */
7844 if (!inst
.operands
[i
].negative
)
7845 inst
.instruction
|= INDEX_UP
;
7846 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
7851 /* inst.operands[i] was set up by parse_address. Encode it into an
7852 ARM-format mode 3 load or store instruction. Reject forms that
7853 cannot be used with such instructions. If is_t is true, reject
7854 forms that cannot be used with a T instruction (i.e. not
7857 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7859 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7861 inst
.error
= _("instruction does not accept scaled register index");
7865 encode_arm_addr_mode_common (i
, is_t
);
7867 if (inst
.operands
[i
].immisreg
)
7869 constraint ((inst
.operands
[i
].imm
== REG_PC
7870 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7872 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7874 inst
.instruction
|= inst
.operands
[i
].imm
;
7875 if (!inst
.operands
[i
].negative
)
7876 inst
.instruction
|= INDEX_UP
;
7878 else /* immediate offset in inst.relocs[0] */
7880 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
7881 && inst
.operands
[i
].writeback
),
7883 inst
.instruction
|= HWOFFSET_IMM
;
7884 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7886 /* Prefer + for zero encoded value. */
7887 if (!inst
.operands
[i
].negative
)
7888 inst
.instruction
|= INDEX_UP
;
7890 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7895 /* Write immediate bits [7:0] to the following locations:
7897 |28/24|23 19|18 16|15 4|3 0|
7898 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7900 This function is used by VMOV/VMVN/VORR/VBIC. */
7903 neon_write_immbits (unsigned immbits
)
7905 inst
.instruction
|= immbits
& 0xf;
7906 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7907 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7910 /* Invert low-order SIZE bits of XHI:XLO. */
7913 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7915 unsigned immlo
= xlo
? *xlo
: 0;
7916 unsigned immhi
= xhi
? *xhi
: 0;
7921 immlo
= (~immlo
) & 0xff;
7925 immlo
= (~immlo
) & 0xffff;
7929 immhi
= (~immhi
) & 0xffffffff;
7933 immlo
= (~immlo
) & 0xffffffff;
7947 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7951 neon_bits_same_in_bytes (unsigned imm
)
7953 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7954 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7955 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7956 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7959 /* For immediate of above form, return 0bABCD. */
7962 neon_squash_bits (unsigned imm
)
7964 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7965 | ((imm
& 0x01000000) >> 21);
7968 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7971 neon_qfloat_bits (unsigned imm
)
7973 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7976 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7977 the instruction. *OP is passed as the initial value of the op field, and
7978 may be set to a different value depending on the constant (i.e.
7979 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7980 MVN). If the immediate looks like a repeated pattern then also
7981 try smaller element sizes. */
7984 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7985 unsigned *immbits
, int *op
, int size
,
7986 enum neon_el_type type
)
7988 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7990 if (type
== NT_float
&& !float_p
)
7993 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7995 if (size
!= 32 || *op
== 1)
7997 *immbits
= neon_qfloat_bits (immlo
);
8003 if (neon_bits_same_in_bytes (immhi
)
8004 && neon_bits_same_in_bytes (immlo
))
8008 *immbits
= (neon_squash_bits (immhi
) << 4)
8009 | neon_squash_bits (immlo
);
8020 if (immlo
== (immlo
& 0x000000ff))
8025 else if (immlo
== (immlo
& 0x0000ff00))
8027 *immbits
= immlo
>> 8;
8030 else if (immlo
== (immlo
& 0x00ff0000))
8032 *immbits
= immlo
>> 16;
8035 else if (immlo
== (immlo
& 0xff000000))
8037 *immbits
= immlo
>> 24;
8040 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
8042 *immbits
= (immlo
>> 8) & 0xff;
8045 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
8047 *immbits
= (immlo
>> 16) & 0xff;
8051 if ((immlo
& 0xffff) != (immlo
>> 16))
8058 if (immlo
== (immlo
& 0x000000ff))
8063 else if (immlo
== (immlo
& 0x0000ff00))
8065 *immbits
= immlo
>> 8;
8069 if ((immlo
& 0xff) != (immlo
>> 8))
8074 if (immlo
== (immlo
& 0x000000ff))
8076 /* Don't allow MVN with 8-bit immediate. */
8086 #if defined BFD_HOST_64_BIT
8087 /* Returns TRUE if double precision value V may be cast
8088 to single precision without loss of accuracy. */
8091 is_double_a_single (bfd_int64_t v
)
8093 int exp
= (int)((v
>> 52) & 0x7FF);
8094 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8096 return (exp
== 0 || exp
== 0x7FF
8097 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
8098 && (mantissa
& 0x1FFFFFFFl
) == 0;
8101 /* Returns a double precision value casted to single precision
8102 (ignoring the least significant bits in exponent and mantissa). */
8105 double_to_single (bfd_int64_t v
)
8107 int sign
= (int) ((v
>> 63) & 1l);
8108 int exp
= (int) ((v
>> 52) & 0x7FF);
8109 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8115 exp
= exp
- 1023 + 127;
8124 /* No denormalized numbers. */
8130 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8132 #endif /* BFD_HOST_64_BIT */
8141 static void do_vfp_nsyn_opcode (const char *);
8143 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8144 Determine whether it can be performed with a move instruction; if
8145 it can, convert inst.instruction to that move instruction and
8146 return TRUE; if it can't, convert inst.instruction to a literal-pool
8147 load and return FALSE. If this is not a valid thing to do in the
8148 current context, set inst.error and return TRUE.
8150 inst.operands[i] describes the destination register. */
8153 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
8156 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
8157 bfd_boolean arm_p
= (t
== CONST_ARM
);
8160 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8164 if ((inst
.instruction
& tbit
) == 0)
8166 inst
.error
= _("invalid pseudo operation");
8170 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8171 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8172 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8174 inst
.error
= _("constant expression expected");
8178 if (inst
.relocs
[0].exp
.X_op
== O_constant
8179 || inst
.relocs
[0].exp
.X_op
== O_big
)
8181 #if defined BFD_HOST_64_BIT
8186 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8188 LITTLENUM_TYPE w
[X_PRECISION
];
8191 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8193 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8195 /* FIXME: Should we check words w[2..5] ? */
8200 #if defined BFD_HOST_64_BIT
8202 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
8203 << LITTLENUM_NUMBER_OF_BITS
)
8204 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
8205 << LITTLENUM_NUMBER_OF_BITS
)
8206 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
8207 << LITTLENUM_NUMBER_OF_BITS
)
8208 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
8210 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
8211 | (l
[0] & LITTLENUM_MASK
);
8215 v
= inst
.relocs
[0].exp
.X_add_number
;
8217 if (!inst
.operands
[i
].issingle
)
8221 /* LDR should not use lead in a flag-setting instruction being
8222 chosen so we do not check whether movs can be used. */
8224 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8225 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8226 && inst
.operands
[i
].reg
!= 13
8227 && inst
.operands
[i
].reg
!= 15)
8229 /* Check if on thumb2 it can be done with a mov.w, mvn or
8230 movw instruction. */
8231 unsigned int newimm
;
8232 bfd_boolean isNegated
;
8234 newimm
= encode_thumb32_immediate (v
);
8235 if (newimm
!= (unsigned int) FAIL
)
8239 newimm
= encode_thumb32_immediate (~v
);
8240 if (newimm
!= (unsigned int) FAIL
)
8244 /* The number can be loaded with a mov.w or mvn
8246 if (newimm
!= (unsigned int) FAIL
8247 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8249 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8250 | (inst
.operands
[i
].reg
<< 8));
8251 /* Change to MOVN. */
8252 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8253 inst
.instruction
|= (newimm
& 0x800) << 15;
8254 inst
.instruction
|= (newimm
& 0x700) << 4;
8255 inst
.instruction
|= (newimm
& 0x0ff);
8258 /* The number can be loaded with a movw instruction. */
8259 else if ((v
& ~0xFFFF) == 0
8260 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8262 int imm
= v
& 0xFFFF;
8264 inst
.instruction
= 0xf2400000; /* MOVW. */
8265 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8266 inst
.instruction
|= (imm
& 0xf000) << 4;
8267 inst
.instruction
|= (imm
& 0x0800) << 15;
8268 inst
.instruction
|= (imm
& 0x0700) << 4;
8269 inst
.instruction
|= (imm
& 0x00ff);
8276 int value
= encode_arm_immediate (v
);
8280 /* This can be done with a mov instruction. */
8281 inst
.instruction
&= LITERAL_MASK
;
8282 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8283 inst
.instruction
|= value
& 0xfff;
8287 value
= encode_arm_immediate (~ v
);
8290 /* This can be done with a mvn instruction. */
8291 inst
.instruction
&= LITERAL_MASK
;
8292 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8293 inst
.instruction
|= value
& 0xfff;
8297 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8300 unsigned immbits
= 0;
8301 unsigned immlo
= inst
.operands
[1].imm
;
8302 unsigned immhi
= inst
.operands
[1].regisimm
8303 ? inst
.operands
[1].reg
8304 : inst
.relocs
[0].exp
.X_unsigned
8306 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8307 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8308 &op
, 64, NT_invtype
);
8312 neon_invert_size (&immlo
, &immhi
, 64);
8314 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8315 &op
, 64, NT_invtype
);
8320 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8326 /* Fill other bits in vmov encoding for both thumb and arm. */
8328 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8330 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8331 neon_write_immbits (immbits
);
8339 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8340 if (inst
.operands
[i
].issingle
8341 && is_quarter_float (inst
.operands
[1].imm
)
8342 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8344 inst
.operands
[1].imm
=
8345 neon_qfloat_bits (v
);
8346 do_vfp_nsyn_opcode ("fconsts");
8350 /* If our host does not support a 64-bit type then we cannot perform
8351 the following optimization. This mean that there will be a
8352 discrepancy between the output produced by an assembler built for
8353 a 32-bit-only host and the output produced from a 64-bit host, but
8354 this cannot be helped. */
8355 #if defined BFD_HOST_64_BIT
8356 else if (!inst
.operands
[1].issingle
8357 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8359 if (is_double_a_single (v
)
8360 && is_quarter_float (double_to_single (v
)))
8362 inst
.operands
[1].imm
=
8363 neon_qfloat_bits (double_to_single (v
));
8364 do_vfp_nsyn_opcode ("fconstd");
8372 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8373 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8376 inst
.operands
[1].reg
= REG_PC
;
8377 inst
.operands
[1].isreg
= 1;
8378 inst
.operands
[1].preind
= 1;
8379 inst
.relocs
[0].pc_rel
= 1;
8380 inst
.relocs
[0].type
= (thumb_p
8381 ? BFD_RELOC_ARM_THUMB_OFFSET
8383 ? BFD_RELOC_ARM_HWLITERAL
8384 : BFD_RELOC_ARM_LITERAL
));
8388 /* inst.operands[i] was set up by parse_address. Encode it into an
8389 ARM-format instruction. Reject all forms which cannot be encoded
8390 into a coprocessor load/store instruction. If wb_ok is false,
8391 reject use of writeback; if unind_ok is false, reject use of
8392 unindexed addressing. If reloc_override is not 0, use it instead
8393 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8394 (in which case it is preserved). */
8397 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8399 if (!inst
.operands
[i
].isreg
)
8402 if (! inst
.operands
[0].isvec
)
8404 inst
.error
= _("invalid co-processor operand");
8407 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8411 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8413 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8415 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8417 gas_assert (!inst
.operands
[i
].writeback
);
8420 inst
.error
= _("instruction does not support unindexed addressing");
8423 inst
.instruction
|= inst
.operands
[i
].imm
;
8424 inst
.instruction
|= INDEX_UP
;
8428 if (inst
.operands
[i
].preind
)
8429 inst
.instruction
|= PRE_INDEX
;
8431 if (inst
.operands
[i
].writeback
)
8433 if (inst
.operands
[i
].reg
== REG_PC
)
8435 inst
.error
= _("pc may not be used with write-back");
8440 inst
.error
= _("instruction does not support writeback");
8443 inst
.instruction
|= WRITE_BACK
;
8447 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
8448 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8449 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
8450 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8453 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8455 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8458 /* Prefer + for zero encoded value. */
8459 if (!inst
.operands
[i
].negative
)
8460 inst
.instruction
|= INDEX_UP
;
8465 /* Functions for instruction encoding, sorted by sub-architecture.
8466 First some generics; their names are taken from the conventional
8467 bit positions for register arguments in ARM format instructions. */
8477 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8483 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8489 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8490 inst
.instruction
|= inst
.operands
[1].reg
;
8496 inst
.instruction
|= inst
.operands
[0].reg
;
8497 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8503 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8504 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8510 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8511 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8517 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8518 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8522 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8524 if (ARM_CPU_IS_ANY (cpu_variant
))
8526 as_tsktsk ("%s", msg
);
8529 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8541 unsigned Rn
= inst
.operands
[2].reg
;
8542 /* Enforce restrictions on SWP instruction. */
8543 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8545 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8546 _("Rn must not overlap other operands"));
8548 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8550 if (!check_obsolete (&arm_ext_v8
,
8551 _("swp{b} use is obsoleted for ARMv8 and later"))
8552 && warn_on_deprecated
8553 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8554 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8557 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8558 inst
.instruction
|= inst
.operands
[1].reg
;
8559 inst
.instruction
|= Rn
<< 16;
8565 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8566 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8567 inst
.instruction
|= inst
.operands
[2].reg
;
8573 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8574 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
8575 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
8576 || inst
.relocs
[0].exp
.X_add_number
!= 0),
8578 inst
.instruction
|= inst
.operands
[0].reg
;
8579 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8580 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8586 inst
.instruction
|= inst
.operands
[0].imm
;
8592 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8593 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8596 /* ARM instructions, in alphabetical order by function name (except
8597 that wrapper functions appear immediately after the function they
8600 /* This is a pseudo-op of the form "adr rd, label" to be converted
8601 into a relative address of the form "add rd, pc, #label-.-8". */
8606 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8608 /* Frag hacking will turn this into a sub instruction if the offset turns
8609 out to be negative. */
8610 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
8611 inst
.relocs
[0].pc_rel
= 1;
8612 inst
.relocs
[0].exp
.X_add_number
-= 8;
8614 if (support_interwork
8615 && inst
.relocs
[0].exp
.X_op
== O_symbol
8616 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8617 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8618 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8619 inst
.relocs
[0].exp
.X_add_number
|= 1;
8622 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8623 into a relative address of the form:
8624 add rd, pc, #low(label-.-8)"
8625 add rd, rd, #high(label-.-8)" */
8630 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8632 /* Frag hacking will turn this into a sub instruction if the offset turns
8633 out to be negative. */
8634 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8635 inst
.relocs
[0].pc_rel
= 1;
8636 inst
.size
= INSN_SIZE
* 2;
8637 inst
.relocs
[0].exp
.X_add_number
-= 8;
8639 if (support_interwork
8640 && inst
.relocs
[0].exp
.X_op
== O_symbol
8641 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8642 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8643 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8644 inst
.relocs
[0].exp
.X_add_number
|= 1;
8650 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8651 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8653 if (!inst
.operands
[1].present
)
8654 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8655 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8656 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8657 encode_arm_shifter_operand (2);
8663 if (inst
.operands
[0].present
)
8664 inst
.instruction
|= inst
.operands
[0].imm
;
8666 inst
.instruction
|= 0xf;
8672 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8673 constraint (msb
> 32, _("bit-field extends past end of register"));
8674 /* The instruction encoding stores the LSB and MSB,
8675 not the LSB and width. */
8676 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8677 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8678 inst
.instruction
|= (msb
- 1) << 16;
8686 /* #0 in second position is alternative syntax for bfc, which is
8687 the same instruction but with REG_PC in the Rm field. */
8688 if (!inst
.operands
[1].isreg
)
8689 inst
.operands
[1].reg
= REG_PC
;
8691 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8692 constraint (msb
> 32, _("bit-field extends past end of register"));
8693 /* The instruction encoding stores the LSB and MSB,
8694 not the LSB and width. */
8695 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8696 inst
.instruction
|= inst
.operands
[1].reg
;
8697 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8698 inst
.instruction
|= (msb
- 1) << 16;
8704 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8705 _("bit-field extends past end of register"));
8706 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8707 inst
.instruction
|= inst
.operands
[1].reg
;
8708 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8709 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8712 /* ARM V5 breakpoint instruction (argument parse)
8713 BKPT <16 bit unsigned immediate>
8714 Instruction is not conditional.
8715 The bit pattern given in insns[] has the COND_ALWAYS condition,
8716 and it is an error if the caller tried to override that. */
8721 /* Top 12 of 16 bits to bits 19:8. */
8722 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8724 /* Bottom 4 of 16 bits to bits 3:0. */
8725 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8729 encode_branch (int default_reloc
)
8731 if (inst
.operands
[0].hasreloc
)
8733 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8734 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8735 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8736 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8737 ? BFD_RELOC_ARM_PLT32
8738 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8741 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
8742 inst
.relocs
[0].pc_rel
= 1;
8749 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8750 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8753 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8760 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8762 if (inst
.cond
== COND_ALWAYS
)
8763 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8765 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8769 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8772 /* ARM V5 branch-link-exchange instruction (argument parse)
8773 BLX <target_addr> ie BLX(1)
8774 BLX{<condition>} <Rm> ie BLX(2)
8775 Unfortunately, there are two different opcodes for this mnemonic.
8776 So, the insns[].value is not used, and the code here zaps values
8777 into inst.instruction.
8778 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8783 if (inst
.operands
[0].isreg
)
8785 /* Arg is a register; the opcode provided by insns[] is correct.
8786 It is not illegal to do "blx pc", just useless. */
8787 if (inst
.operands
[0].reg
== REG_PC
)
8788 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8790 inst
.instruction
|= inst
.operands
[0].reg
;
8794 /* Arg is an address; this instruction cannot be executed
8795 conditionally, and the opcode must be adjusted.
8796 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8797 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8798 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8799 inst
.instruction
= 0xfa000000;
8800 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8807 bfd_boolean want_reloc
;
8809 if (inst
.operands
[0].reg
== REG_PC
)
8810 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8812 inst
.instruction
|= inst
.operands
[0].reg
;
8813 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8814 it is for ARMv4t or earlier. */
8815 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8816 if (!ARM_FEATURE_ZERO (selected_object_arch
)
8817 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
8821 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8826 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
8830 /* ARM v5TEJ. Jump to Jazelle code. */
8835 if (inst
.operands
[0].reg
== REG_PC
)
8836 as_tsktsk (_("use of r15 in bxj is not really useful"));
8838 inst
.instruction
|= inst
.operands
[0].reg
;
8841 /* Co-processor data operation:
8842 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8843 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8847 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8848 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8849 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8850 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8851 inst
.instruction
|= inst
.operands
[4].reg
;
8852 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8858 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8859 encode_arm_shifter_operand (1);
8862 /* Transfer between coprocessor and ARM registers.
8863 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8868 No special properties. */
8870 struct deprecated_coproc_regs_s
8877 arm_feature_set deprecated
;
8878 arm_feature_set obsoleted
;
8879 const char *dep_msg
;
8880 const char *obs_msg
;
8883 #define DEPR_ACCESS_V8 \
8884 N_("This coprocessor register access is deprecated in ARMv8")
8886 /* Table of all deprecated coprocessor registers. */
8887 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8889 {15, 0, 7, 10, 5, /* CP15DMB. */
8890 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8891 DEPR_ACCESS_V8
, NULL
},
8892 {15, 0, 7, 10, 4, /* CP15DSB. */
8893 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8894 DEPR_ACCESS_V8
, NULL
},
8895 {15, 0, 7, 5, 4, /* CP15ISB. */
8896 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8897 DEPR_ACCESS_V8
, NULL
},
8898 {14, 6, 1, 0, 0, /* TEEHBR. */
8899 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8900 DEPR_ACCESS_V8
, NULL
},
8901 {14, 6, 0, 0, 0, /* TEECR. */
8902 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8903 DEPR_ACCESS_V8
, NULL
},
8906 #undef DEPR_ACCESS_V8
8908 static const size_t deprecated_coproc_reg_count
=
8909 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8917 Rd
= inst
.operands
[2].reg
;
8920 if (inst
.instruction
== 0xee000010
8921 || inst
.instruction
== 0xfe000010)
8923 reject_bad_reg (Rd
);
8924 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
8926 constraint (Rd
== REG_SP
, BAD_SP
);
8931 if (inst
.instruction
== 0xe000010)
8932 constraint (Rd
== REG_PC
, BAD_PC
);
8935 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8937 const struct deprecated_coproc_regs_s
*r
=
8938 deprecated_coproc_regs
+ i
;
8940 if (inst
.operands
[0].reg
== r
->cp
8941 && inst
.operands
[1].imm
== r
->opc1
8942 && inst
.operands
[3].reg
== r
->crn
8943 && inst
.operands
[4].reg
== r
->crm
8944 && inst
.operands
[5].imm
== r
->opc2
)
8946 if (! ARM_CPU_IS_ANY (cpu_variant
)
8947 && warn_on_deprecated
8948 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8949 as_tsktsk ("%s", r
->dep_msg
);
8953 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8954 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8955 inst
.instruction
|= Rd
<< 12;
8956 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8957 inst
.instruction
|= inst
.operands
[4].reg
;
8958 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8961 /* Transfer between coprocessor register and pair of ARM registers.
8962 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8967 Two XScale instructions are special cases of these:
8969 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8970 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8972 Result unpredictable if Rd or Rn is R15. */
8979 Rd
= inst
.operands
[2].reg
;
8980 Rn
= inst
.operands
[3].reg
;
8984 reject_bad_reg (Rd
);
8985 reject_bad_reg (Rn
);
8989 constraint (Rd
== REG_PC
, BAD_PC
);
8990 constraint (Rn
== REG_PC
, BAD_PC
);
8993 /* Only check the MRRC{2} variants. */
8994 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
8996 /* If Rd == Rn, error that the operation is
8997 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8998 constraint (Rd
== Rn
, BAD_OVERLAP
);
9001 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9002 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
9003 inst
.instruction
|= Rd
<< 12;
9004 inst
.instruction
|= Rn
<< 16;
9005 inst
.instruction
|= inst
.operands
[4].reg
;
9011 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
9012 if (inst
.operands
[1].present
)
9014 inst
.instruction
|= CPSI_MMOD
;
9015 inst
.instruction
|= inst
.operands
[1].imm
;
9022 inst
.instruction
|= inst
.operands
[0].imm
;
9028 unsigned Rd
, Rn
, Rm
;
9030 Rd
= inst
.operands
[0].reg
;
9031 Rn
= (inst
.operands
[1].present
9032 ? inst
.operands
[1].reg
: Rd
);
9033 Rm
= inst
.operands
[2].reg
;
9035 constraint ((Rd
== REG_PC
), BAD_PC
);
9036 constraint ((Rn
== REG_PC
), BAD_PC
);
9037 constraint ((Rm
== REG_PC
), BAD_PC
);
9039 inst
.instruction
|= Rd
<< 16;
9040 inst
.instruction
|= Rn
<< 0;
9041 inst
.instruction
|= Rm
<< 8;
9047 /* There is no IT instruction in ARM mode. We
9048 process it to do the validation as if in
9049 thumb mode, just in case the code gets
9050 assembled for thumb using the unified syntax. */
9055 set_it_insn_type (IT_INSN
);
9056 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
9057 now_it
.cc
= inst
.operands
[0].imm
;
9061 /* If there is only one register in the register list,
9062 then return its register number. Otherwise return -1. */
9064 only_one_reg_in_list (int range
)
9066 int i
= ffs (range
) - 1;
9067 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
9071 encode_ldmstm(int from_push_pop_mnem
)
9073 int base_reg
= inst
.operands
[0].reg
;
9074 int range
= inst
.operands
[1].imm
;
9077 inst
.instruction
|= base_reg
<< 16;
9078 inst
.instruction
|= range
;
9080 if (inst
.operands
[1].writeback
)
9081 inst
.instruction
|= LDM_TYPE_2_OR_3
;
9083 if (inst
.operands
[0].writeback
)
9085 inst
.instruction
|= WRITE_BACK
;
9086 /* Check for unpredictable uses of writeback. */
9087 if (inst
.instruction
& LOAD_BIT
)
9089 /* Not allowed in LDM type 2. */
9090 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
9091 && ((range
& (1 << REG_PC
)) == 0))
9092 as_warn (_("writeback of base register is UNPREDICTABLE"));
9093 /* Only allowed if base reg not in list for other types. */
9094 else if (range
& (1 << base_reg
))
9095 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9099 /* Not allowed for type 2. */
9100 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
9101 as_warn (_("writeback of base register is UNPREDICTABLE"));
9102 /* Only allowed if base reg not in list, or first in list. */
9103 else if ((range
& (1 << base_reg
))
9104 && (range
& ((1 << base_reg
) - 1)))
9105 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9109 /* If PUSH/POP has only one register, then use the A2 encoding. */
9110 one_reg
= only_one_reg_in_list (range
);
9111 if (from_push_pop_mnem
&& one_reg
>= 0)
9113 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9115 if (is_push
&& one_reg
== 13 /* SP */)
9116 /* PR 22483: The A2 encoding cannot be used when
9117 pushing the stack pointer as this is UNPREDICTABLE. */
9120 inst
.instruction
&= A_COND_MASK
;
9121 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9122 inst
.instruction
|= one_reg
<< 12;
9129 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
9132 /* ARMv5TE load-consecutive (argument parse)
9141 constraint (inst
.operands
[0].reg
% 2 != 0,
9142 _("first transfer register must be even"));
9143 constraint (inst
.operands
[1].present
9144 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9145 _("can only transfer two consecutive registers"));
9146 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9147 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9149 if (!inst
.operands
[1].present
)
9150 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9152 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9153 register and the first register written; we have to diagnose
9154 overlap between the base and the second register written here. */
9156 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9157 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9158 as_warn (_("base register written back, and overlaps "
9159 "second transfer register"));
9161 if (!(inst
.instruction
& V4_STR_BIT
))
9163 /* For an index-register load, the index register must not overlap the
9164 destination (even if not write-back). */
9165 if (inst
.operands
[2].immisreg
9166 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9167 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9168 as_warn (_("index register overlaps transfer register"));
9170 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9171 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
9177 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9178 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9179 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9180 || inst
.operands
[1].negative
9181 /* This can arise if the programmer has written
9183 or if they have mistakenly used a register name as the last
9186 It is very difficult to distinguish between these two cases
9187 because "rX" might actually be a label. ie the register
9188 name has been occluded by a symbol of the same name. So we
9189 just generate a general 'bad addressing mode' type error
9190 message and leave it up to the programmer to discover the
9191 true cause and fix their mistake. */
9192 || (inst
.operands
[1].reg
== REG_PC
),
9195 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9196 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9197 _("offset must be zero in ARM encoding"));
9199 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9201 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9202 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9203 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9209 constraint (inst
.operands
[0].reg
% 2 != 0,
9210 _("even register required"));
9211 constraint (inst
.operands
[1].present
9212 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9213 _("can only load two consecutive registers"));
9214 /* If op 1 were present and equal to PC, this function wouldn't
9215 have been called in the first place. */
9216 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9218 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9219 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9222 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9223 which is not a multiple of four is UNPREDICTABLE. */
9225 check_ldr_r15_aligned (void)
9227 constraint (!(inst
.operands
[1].immisreg
)
9228 && (inst
.operands
[0].reg
== REG_PC
9229 && inst
.operands
[1].reg
== REG_PC
9230 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9231 _("ldr to register 15 must be 4-byte aligned"));
9237 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9238 if (!inst
.operands
[1].isreg
)
9239 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9241 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9242 check_ldr_r15_aligned ();
9248 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9250 if (inst
.operands
[1].preind
)
9252 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9253 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9254 _("this instruction requires a post-indexed address"));
9256 inst
.operands
[1].preind
= 0;
9257 inst
.operands
[1].postind
= 1;
9258 inst
.operands
[1].writeback
= 1;
9260 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9261 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9264 /* Halfword and signed-byte load/store operations. */
9269 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9270 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9271 if (!inst
.operands
[1].isreg
)
9272 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9274 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9280 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9282 if (inst
.operands
[1].preind
)
9284 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9285 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9286 _("this instruction requires a post-indexed address"));
9288 inst
.operands
[1].preind
= 0;
9289 inst
.operands
[1].postind
= 1;
9290 inst
.operands
[1].writeback
= 1;
9292 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9293 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9296 /* Co-processor register load/store.
9297 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9301 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9302 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9303 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9309 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9310 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9311 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9312 && !(inst
.instruction
& 0x00400000))
9313 as_tsktsk (_("Rd and Rm should be different in mla"));
9315 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9316 inst
.instruction
|= inst
.operands
[1].reg
;
9317 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9318 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9324 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9325 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9327 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9328 encode_arm_shifter_operand (1);
9331 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9338 top
= (inst
.instruction
& 0x00400000) != 0;
9339 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
9340 _(":lower16: not allowed in this instruction"));
9341 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
9342 _(":upper16: not allowed in this instruction"));
9343 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9344 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
9346 imm
= inst
.relocs
[0].exp
.X_add_number
;
9347 /* The value is in two pieces: 0:11, 16:19. */
9348 inst
.instruction
|= (imm
& 0x00000fff);
9349 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9354 do_vfp_nsyn_mrs (void)
9356 if (inst
.operands
[0].isvec
)
9358 if (inst
.operands
[1].reg
!= 1)
9359 first_error (_("operand 1 must be FPSCR"));
9360 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9361 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9362 do_vfp_nsyn_opcode ("fmstat");
9364 else if (inst
.operands
[1].isvec
)
9365 do_vfp_nsyn_opcode ("fmrx");
9373 do_vfp_nsyn_msr (void)
9375 if (inst
.operands
[0].isvec
)
9376 do_vfp_nsyn_opcode ("fmxr");
9386 unsigned Rt
= inst
.operands
[0].reg
;
9388 if (thumb_mode
&& Rt
== REG_SP
)
9390 inst
.error
= BAD_SP
;
9394 /* MVFR2 is only valid at ARMv8-A. */
9395 if (inst
.operands
[1].reg
== 5)
9396 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9399 /* APSR_ sets isvec. All other refs to PC are illegal. */
9400 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9402 inst
.error
= BAD_PC
;
9406 /* If we get through parsing the register name, we just insert the number
9407 generated into the instruction without further validation. */
9408 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9409 inst
.instruction
|= (Rt
<< 12);
9415 unsigned Rt
= inst
.operands
[1].reg
;
9418 reject_bad_reg (Rt
);
9419 else if (Rt
== REG_PC
)
9421 inst
.error
= BAD_PC
;
9425 /* MVFR2 is only valid for ARMv8-A. */
9426 if (inst
.operands
[0].reg
== 5)
9427 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9430 /* If we get through parsing the register name, we just insert the number
9431 generated into the instruction without further validation. */
9432 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9433 inst
.instruction
|= (Rt
<< 12);
9441 if (do_vfp_nsyn_mrs () == SUCCESS
)
9444 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9445 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9447 if (inst
.operands
[1].isreg
)
9449 br
= inst
.operands
[1].reg
;
9450 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
9451 as_bad (_("bad register for mrs"));
9455 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9456 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9458 _("'APSR', 'CPSR' or 'SPSR' expected"));
9459 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9462 inst
.instruction
|= br
;
9465 /* Two possible forms:
9466 "{C|S}PSR_<field>, Rm",
9467 "{C|S}PSR_f, #expression". */
9472 if (do_vfp_nsyn_msr () == SUCCESS
)
9475 inst
.instruction
|= inst
.operands
[0].imm
;
9476 if (inst
.operands
[1].isreg
)
9477 inst
.instruction
|= inst
.operands
[1].reg
;
9480 inst
.instruction
|= INST_IMMEDIATE
;
9481 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9482 inst
.relocs
[0].pc_rel
= 0;
9489 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9491 if (!inst
.operands
[2].present
)
9492 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9493 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9494 inst
.instruction
|= inst
.operands
[1].reg
;
9495 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9497 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9498 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9499 as_tsktsk (_("Rd and Rm should be different in mul"));
9502 /* Long Multiply Parser
9503 UMULL RdLo, RdHi, Rm, Rs
9504 SMULL RdLo, RdHi, Rm, Rs
9505 UMLAL RdLo, RdHi, Rm, Rs
9506 SMLAL RdLo, RdHi, Rm, Rs. */
9511 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9512 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9513 inst
.instruction
|= inst
.operands
[2].reg
;
9514 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9516 /* rdhi and rdlo must be different. */
9517 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9518 as_tsktsk (_("rdhi and rdlo must be different"));
9520 /* rdhi, rdlo and rm must all be different before armv6. */
9521 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9522 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9523 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9524 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9530 if (inst
.operands
[0].present
9531 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9533 /* Architectural NOP hints are CPSR sets with no bits selected. */
9534 inst
.instruction
&= 0xf0000000;
9535 inst
.instruction
|= 0x0320f000;
9536 if (inst
.operands
[0].present
)
9537 inst
.instruction
|= inst
.operands
[0].imm
;
9541 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9542 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9543 Condition defaults to COND_ALWAYS.
9544 Error if Rd, Rn or Rm are R15. */
9549 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9550 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9551 inst
.instruction
|= inst
.operands
[2].reg
;
9552 if (inst
.operands
[3].present
)
9553 encode_arm_shift (3);
9556 /* ARM V6 PKHTB (Argument Parse). */
9561 if (!inst
.operands
[3].present
)
9563 /* If the shift specifier is omitted, turn the instruction
9564 into pkhbt rd, rm, rn. */
9565 inst
.instruction
&= 0xfff00010;
9566 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9567 inst
.instruction
|= inst
.operands
[1].reg
;
9568 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9572 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9573 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9574 inst
.instruction
|= inst
.operands
[2].reg
;
9575 encode_arm_shift (3);
9579 /* ARMv5TE: Preload-Cache
9580 MP Extensions: Preload for write
9584 Syntactically, like LDR with B=1, W=0, L=1. */
9589 constraint (!inst
.operands
[0].isreg
,
9590 _("'[' expected after PLD mnemonic"));
9591 constraint (inst
.operands
[0].postind
,
9592 _("post-indexed expression used in preload instruction"));
9593 constraint (inst
.operands
[0].writeback
,
9594 _("writeback used in preload instruction"));
9595 constraint (!inst
.operands
[0].preind
,
9596 _("unindexed addressing used in preload instruction"));
9597 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9600 /* ARMv7: PLI <addr_mode> */
9604 constraint (!inst
.operands
[0].isreg
,
9605 _("'[' expected after PLI mnemonic"));
9606 constraint (inst
.operands
[0].postind
,
9607 _("post-indexed expression used in preload instruction"));
9608 constraint (inst
.operands
[0].writeback
,
9609 _("writeback used in preload instruction"));
9610 constraint (!inst
.operands
[0].preind
,
9611 _("unindexed addressing used in preload instruction"));
9612 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9613 inst
.instruction
&= ~PRE_INDEX
;
9619 constraint (inst
.operands
[0].writeback
,
9620 _("push/pop do not support {reglist}^"));
9621 inst
.operands
[1] = inst
.operands
[0];
9622 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9623 inst
.operands
[0].isreg
= 1;
9624 inst
.operands
[0].writeback
= 1;
9625 inst
.operands
[0].reg
= REG_SP
;
9626 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9629 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9630 word at the specified address and the following word
9632 Unconditionally executed.
9633 Error if Rn is R15. */
9638 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9639 if (inst
.operands
[0].writeback
)
9640 inst
.instruction
|= WRITE_BACK
;
9643 /* ARM V6 ssat (argument parse). */
9648 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9649 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9650 inst
.instruction
|= inst
.operands
[2].reg
;
9652 if (inst
.operands
[3].present
)
9653 encode_arm_shift (3);
9656 /* ARM V6 usat (argument parse). */
9661 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9662 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9663 inst
.instruction
|= inst
.operands
[2].reg
;
9665 if (inst
.operands
[3].present
)
9666 encode_arm_shift (3);
9669 /* ARM V6 ssat16 (argument parse). */
9674 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9675 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9676 inst
.instruction
|= inst
.operands
[2].reg
;
9682 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9683 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9684 inst
.instruction
|= inst
.operands
[2].reg
;
9687 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9688 preserving the other bits.
9690 setend <endian_specifier>, where <endian_specifier> is either
9696 if (warn_on_deprecated
9697 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9698 as_tsktsk (_("setend use is deprecated for ARMv8"));
9700 if (inst
.operands
[0].imm
)
9701 inst
.instruction
|= 0x200;
9707 unsigned int Rm
= (inst
.operands
[1].present
9708 ? inst
.operands
[1].reg
9709 : inst
.operands
[0].reg
);
9711 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9712 inst
.instruction
|= Rm
;
9713 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9715 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9716 inst
.instruction
|= SHIFT_BY_REG
;
9717 /* PR 12854: Error on extraneous shifts. */
9718 constraint (inst
.operands
[2].shifted
,
9719 _("extraneous shift as part of operand to shift insn"));
9722 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
9728 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
9729 inst
.relocs
[0].pc_rel
= 0;
9735 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
9736 inst
.relocs
[0].pc_rel
= 0;
9742 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
9743 inst
.relocs
[0].pc_rel
= 0;
9749 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9750 _("selected processor does not support SETPAN instruction"));
9752 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9758 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9759 _("selected processor does not support SETPAN instruction"));
9761 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9764 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9765 SMLAxy{cond} Rd,Rm,Rs,Rn
9766 SMLAWy{cond} Rd,Rm,Rs,Rn
9767 Error if any register is R15. */
9772 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9773 inst
.instruction
|= inst
.operands
[1].reg
;
9774 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9775 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9778 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9779 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9780 Error if any register is R15.
9781 Warning if Rdlo == Rdhi. */
9786 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9787 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9788 inst
.instruction
|= inst
.operands
[2].reg
;
9789 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9791 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9792 as_tsktsk (_("rdhi and rdlo must be different"));
9795 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9796 SMULxy{cond} Rd,Rm,Rs
9797 Error if any register is R15. */
9802 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9803 inst
.instruction
|= inst
.operands
[1].reg
;
9804 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9807 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9808 the same for both ARM and Thumb-2. */
9815 if (inst
.operands
[0].present
)
9817 reg
= inst
.operands
[0].reg
;
9818 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9823 inst
.instruction
|= reg
<< 16;
9824 inst
.instruction
|= inst
.operands
[1].imm
;
9825 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9826 inst
.instruction
|= WRITE_BACK
;
9829 /* ARM V6 strex (argument parse). */
9834 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9835 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9836 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9837 || inst
.operands
[2].negative
9838 /* See comment in do_ldrex(). */
9839 || (inst
.operands
[2].reg
== REG_PC
),
9842 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9843 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9845 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9846 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9847 _("offset must be zero in ARM encoding"));
9849 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9850 inst
.instruction
|= inst
.operands
[1].reg
;
9851 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9852 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9858 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9859 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9860 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9861 || inst
.operands
[2].negative
,
9864 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9865 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9873 constraint (inst
.operands
[1].reg
% 2 != 0,
9874 _("even register required"));
9875 constraint (inst
.operands
[2].present
9876 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9877 _("can only store two consecutive registers"));
9878 /* If op 2 were present and equal to PC, this function wouldn't
9879 have been called in the first place. */
9880 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9882 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9883 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9884 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9887 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9888 inst
.instruction
|= inst
.operands
[1].reg
;
9889 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9896 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9897 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9905 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9906 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9911 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9912 extends it to 32-bits, and adds the result to a value in another
9913 register. You can specify a rotation by 0, 8, 16, or 24 bits
9914 before extracting the 16-bit value.
9915 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9916 Condition defaults to COND_ALWAYS.
9917 Error if any register uses R15. */
9922 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9923 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9924 inst
.instruction
|= inst
.operands
[2].reg
;
9925 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9930 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9931 Condition defaults to COND_ALWAYS.
9932 Error if any register uses R15. */
9937 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9938 inst
.instruction
|= inst
.operands
[1].reg
;
9939 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9942 /* VFP instructions. In a logical order: SP variant first, monad
9943 before dyad, arithmetic then move then load/store. */
9946 do_vfp_sp_monadic (void)
9948 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9949 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9953 do_vfp_sp_dyadic (void)
9955 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9956 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9957 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9961 do_vfp_sp_compare_z (void)
9963 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9967 do_vfp_dp_sp_cvt (void)
9969 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9970 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9974 do_vfp_sp_dp_cvt (void)
9976 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9977 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9981 do_vfp_reg_from_sp (void)
9983 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9984 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9988 do_vfp_reg2_from_sp2 (void)
9990 constraint (inst
.operands
[2].imm
!= 2,
9991 _("only two consecutive VFP SP registers allowed here"));
9992 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9993 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9994 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9998 do_vfp_sp_from_reg (void)
10000 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
10001 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10005 do_vfp_sp2_from_reg2 (void)
10007 constraint (inst
.operands
[0].imm
!= 2,
10008 _("only two consecutive VFP SP registers allowed here"));
10009 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
10010 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10011 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10015 do_vfp_sp_ldst (void)
10017 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10018 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10022 do_vfp_dp_ldst (void)
10024 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10025 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10030 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
10032 if (inst
.operands
[0].writeback
)
10033 inst
.instruction
|= WRITE_BACK
;
10035 constraint (ldstm_type
!= VFP_LDSTMIA
,
10036 _("this addressing mode requires base-register writeback"));
10037 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10038 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
10039 inst
.instruction
|= inst
.operands
[1].imm
;
10043 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
10047 if (inst
.operands
[0].writeback
)
10048 inst
.instruction
|= WRITE_BACK
;
10050 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
10051 _("this addressing mode requires base-register writeback"));
10053 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10054 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10056 count
= inst
.operands
[1].imm
<< 1;
10057 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
10060 inst
.instruction
|= count
;
10064 do_vfp_sp_ldstmia (void)
10066 vfp_sp_ldstm (VFP_LDSTMIA
);
10070 do_vfp_sp_ldstmdb (void)
10072 vfp_sp_ldstm (VFP_LDSTMDB
);
10076 do_vfp_dp_ldstmia (void)
10078 vfp_dp_ldstm (VFP_LDSTMIA
);
10082 do_vfp_dp_ldstmdb (void)
10084 vfp_dp_ldstm (VFP_LDSTMDB
);
10088 do_vfp_xp_ldstmia (void)
10090 vfp_dp_ldstm (VFP_LDSTMIAX
);
10094 do_vfp_xp_ldstmdb (void)
10096 vfp_dp_ldstm (VFP_LDSTMDBX
);
10100 do_vfp_dp_rd_rm (void)
10102 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10103 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10107 do_vfp_dp_rn_rd (void)
10109 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
10110 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10114 do_vfp_dp_rd_rn (void)
10116 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10117 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10121 do_vfp_dp_rd_rn_rm (void)
10123 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10124 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10125 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10129 do_vfp_dp_rd (void)
10131 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10135 do_vfp_dp_rm_rd_rn (void)
10137 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10138 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10139 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10142 /* VFPv3 instructions. */
10144 do_vfp_sp_const (void)
10146 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10147 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10148 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10152 do_vfp_dp_const (void)
10154 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10155 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10156 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10160 vfp_conv (int srcsize
)
10162 int immbits
= srcsize
- inst
.operands
[1].imm
;
10164 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10166 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10167 i.e. immbits must be in range 0 - 16. */
10168 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10171 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10173 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10174 i.e. immbits must be in range 0 - 31. */
10175 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10179 inst
.instruction
|= (immbits
& 1) << 5;
10180 inst
.instruction
|= (immbits
>> 1);
10184 do_vfp_sp_conv_16 (void)
10186 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10191 do_vfp_dp_conv_16 (void)
10193 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10198 do_vfp_sp_conv_32 (void)
10200 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10205 do_vfp_dp_conv_32 (void)
10207 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10211 /* FPA instructions. Also in a logical order. */
10216 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10217 inst
.instruction
|= inst
.operands
[1].reg
;
10221 do_fpa_ldmstm (void)
10223 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10224 switch (inst
.operands
[1].imm
)
10226 case 1: inst
.instruction
|= CP_T_X
; break;
10227 case 2: inst
.instruction
|= CP_T_Y
; break;
10228 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
10233 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
10235 /* The instruction specified "ea" or "fd", so we can only accept
10236 [Rn]{!}. The instruction does not really support stacking or
10237 unstacking, so we have to emulate these by setting appropriate
10238 bits and offsets. */
10239 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10240 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10241 _("this instruction does not support indexing"));
10243 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10244 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10246 if (!(inst
.instruction
& INDEX_UP
))
10247 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
10249 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10251 inst
.operands
[2].preind
= 0;
10252 inst
.operands
[2].postind
= 1;
10256 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10259 /* iWMMXt instructions: strictly in alphabetical order. */
10262 do_iwmmxt_tandorc (void)
10264 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10268 do_iwmmxt_textrc (void)
10270 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10271 inst
.instruction
|= inst
.operands
[1].imm
;
10275 do_iwmmxt_textrm (void)
10277 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10278 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10279 inst
.instruction
|= inst
.operands
[2].imm
;
10283 do_iwmmxt_tinsr (void)
10285 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10286 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10287 inst
.instruction
|= inst
.operands
[2].imm
;
10291 do_iwmmxt_tmia (void)
10293 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10294 inst
.instruction
|= inst
.operands
[1].reg
;
10295 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10299 do_iwmmxt_waligni (void)
10301 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10302 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10303 inst
.instruction
|= inst
.operands
[2].reg
;
10304 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10308 do_iwmmxt_wmerge (void)
10310 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10311 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10312 inst
.instruction
|= inst
.operands
[2].reg
;
10313 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10317 do_iwmmxt_wmov (void)
10319 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10320 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10321 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10322 inst
.instruction
|= inst
.operands
[1].reg
;
10326 do_iwmmxt_wldstbh (void)
10329 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10331 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10333 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10334 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10338 do_iwmmxt_wldstw (void)
10340 /* RIWR_RIWC clears .isreg for a control register. */
10341 if (!inst
.operands
[0].isreg
)
10343 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10344 inst
.instruction
|= 0xf0000000;
10347 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10348 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10352 do_iwmmxt_wldstd (void)
10354 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10355 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10356 && inst
.operands
[1].immisreg
)
10358 inst
.instruction
&= ~0x1a000ff;
10359 inst
.instruction
|= (0xfU
<< 28);
10360 if (inst
.operands
[1].preind
)
10361 inst
.instruction
|= PRE_INDEX
;
10362 if (!inst
.operands
[1].negative
)
10363 inst
.instruction
|= INDEX_UP
;
10364 if (inst
.operands
[1].writeback
)
10365 inst
.instruction
|= WRITE_BACK
;
10366 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10367 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10368 inst
.instruction
|= inst
.operands
[1].imm
;
10371 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10375 do_iwmmxt_wshufh (void)
10377 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10378 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10379 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10380 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10384 do_iwmmxt_wzero (void)
10386 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10387 inst
.instruction
|= inst
.operands
[0].reg
;
10388 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10389 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10393 do_iwmmxt_wrwrwr_or_imm5 (void)
10395 if (inst
.operands
[2].isreg
)
10398 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10399 _("immediate operand requires iWMMXt2"));
10401 if (inst
.operands
[2].imm
== 0)
10403 switch ((inst
.instruction
>> 20) & 0xf)
10409 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10410 inst
.operands
[2].imm
= 16;
10411 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10417 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10418 inst
.operands
[2].imm
= 32;
10419 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10426 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10428 wrn
= (inst
.instruction
>> 16) & 0xf;
10429 inst
.instruction
&= 0xff0fff0f;
10430 inst
.instruction
|= wrn
;
10431 /* Bail out here; the instruction is now assembled. */
10436 /* Map 32 -> 0, etc. */
10437 inst
.operands
[2].imm
&= 0x1f;
10438 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10442 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10443 operations first, then control, shift, and load/store. */
10445 /* Insns like "foo X,Y,Z". */
10448 do_mav_triple (void)
10450 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10451 inst
.instruction
|= inst
.operands
[1].reg
;
10452 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10455 /* Insns like "foo W,X,Y,Z".
10456 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10461 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10462 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10463 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10464 inst
.instruction
|= inst
.operands
[3].reg
;
10467 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10469 do_mav_dspsc (void)
10471 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10474 /* Maverick shift immediate instructions.
10475 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10476 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10479 do_mav_shift (void)
10481 int imm
= inst
.operands
[2].imm
;
10483 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10484 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10486 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10487 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10488 Bit 4 should be 0. */
10489 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10491 inst
.instruction
|= imm
;
10494 /* XScale instructions. Also sorted arithmetic before move. */
10496 /* Xscale multiply-accumulate (argument parse)
10499 MIAxycc acc0,Rm,Rs. */
10504 inst
.instruction
|= inst
.operands
[1].reg
;
10505 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10508 /* Xscale move-accumulator-register (argument parse)
10510 MARcc acc0,RdLo,RdHi. */
10515 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10516 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10519 /* Xscale move-register-accumulator (argument parse)
10521 MRAcc RdLo,RdHi,acc0. */
10526 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10527 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10528 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10531 /* Encoding functions relevant only to Thumb. */
10533 /* inst.operands[i] is a shifted-register operand; encode
10534 it into inst.instruction in the format used by Thumb32. */
10537 encode_thumb32_shifted_operand (int i
)
10539 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10540 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10542 constraint (inst
.operands
[i
].immisreg
,
10543 _("shift by register not allowed in thumb mode"));
10544 inst
.instruction
|= inst
.operands
[i
].reg
;
10545 if (shift
== SHIFT_RRX
)
10546 inst
.instruction
|= SHIFT_ROR
<< 4;
10549 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10550 _("expression too complex"));
10552 constraint (value
> 32
10553 || (value
== 32 && (shift
== SHIFT_LSL
10554 || shift
== SHIFT_ROR
)),
10555 _("shift expression is too large"));
10559 else if (value
== 32)
10562 inst
.instruction
|= shift
<< 4;
10563 inst
.instruction
|= (value
& 0x1c) << 10;
10564 inst
.instruction
|= (value
& 0x03) << 6;
10569 /* inst.operands[i] was set up by parse_address. Encode it into a
10570 Thumb32 format load or store instruction. Reject forms that cannot
10571 be used with such instructions. If is_t is true, reject forms that
10572 cannot be used with a T instruction; if is_d is true, reject forms
10573 that cannot be used with a D instruction. If it is a store insn,
10574 reject PC in Rn. */
10577 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10579 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10581 constraint (!inst
.operands
[i
].isreg
,
10582 _("Instruction does not support =N addresses"));
10584 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10585 if (inst
.operands
[i
].immisreg
)
10587 constraint (is_pc
, BAD_PC_ADDRESSING
);
10588 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10589 constraint (inst
.operands
[i
].negative
,
10590 _("Thumb does not support negative register indexing"));
10591 constraint (inst
.operands
[i
].postind
,
10592 _("Thumb does not support register post-indexing"));
10593 constraint (inst
.operands
[i
].writeback
,
10594 _("Thumb does not support register indexing with writeback"));
10595 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10596 _("Thumb supports only LSL in shifted register indexing"));
10598 inst
.instruction
|= inst
.operands
[i
].imm
;
10599 if (inst
.operands
[i
].shifted
)
10601 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10602 _("expression too complex"));
10603 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10604 || inst
.relocs
[0].exp
.X_add_number
> 3,
10605 _("shift out of range"));
10606 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10608 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10610 else if (inst
.operands
[i
].preind
)
10612 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10613 constraint (is_t
&& inst
.operands
[i
].writeback
,
10614 _("cannot use writeback with this instruction"));
10615 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10616 BAD_PC_ADDRESSING
);
10620 inst
.instruction
|= 0x01000000;
10621 if (inst
.operands
[i
].writeback
)
10622 inst
.instruction
|= 0x00200000;
10626 inst
.instruction
|= 0x00000c00;
10627 if (inst
.operands
[i
].writeback
)
10628 inst
.instruction
|= 0x00000100;
10630 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10632 else if (inst
.operands
[i
].postind
)
10634 gas_assert (inst
.operands
[i
].writeback
);
10635 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10636 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10639 inst
.instruction
|= 0x00200000;
10641 inst
.instruction
|= 0x00000900;
10642 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10644 else /* unindexed - only for coprocessor */
10645 inst
.error
= _("instruction does not accept unindexed addressing");
10648 /* Table of Thumb instructions which exist in both 16- and 32-bit
10649 encodings (the latter only in post-V6T2 cores). The index is the
10650 value used in the insns table below. When there is more than one
10651 possible 16-bit encoding for the instruction, this table always
10653 Also contains several pseudo-instructions used during relaxation. */
10654 #define T16_32_TAB \
10655 X(_adc, 4140, eb400000), \
10656 X(_adcs, 4140, eb500000), \
10657 X(_add, 1c00, eb000000), \
10658 X(_adds, 1c00, eb100000), \
10659 X(_addi, 0000, f1000000), \
10660 X(_addis, 0000, f1100000), \
10661 X(_add_pc,000f, f20f0000), \
10662 X(_add_sp,000d, f10d0000), \
10663 X(_adr, 000f, f20f0000), \
10664 X(_and, 4000, ea000000), \
10665 X(_ands, 4000, ea100000), \
10666 X(_asr, 1000, fa40f000), \
10667 X(_asrs, 1000, fa50f000), \
10668 X(_b, e000, f000b000), \
10669 X(_bcond, d000, f0008000), \
10670 X(_bf, 0000, f040e001), \
10671 X(_bfcsel,0000, f000e001), \
10672 X(_bfx, 0000, f060e001), \
10673 X(_bfl, 0000, f000c001), \
10674 X(_bflx, 0000, f070e001), \
10675 X(_bic, 4380, ea200000), \
10676 X(_bics, 4380, ea300000), \
10677 X(_cmn, 42c0, eb100f00), \
10678 X(_cmp, 2800, ebb00f00), \
10679 X(_cpsie, b660, f3af8400), \
10680 X(_cpsid, b670, f3af8600), \
10681 X(_cpy, 4600, ea4f0000), \
10682 X(_dec_sp,80dd, f1ad0d00), \
10683 X(_dls, 0000, f040e001), \
10684 X(_eor, 4040, ea800000), \
10685 X(_eors, 4040, ea900000), \
10686 X(_inc_sp,00dd, f10d0d00), \
10687 X(_ldmia, c800, e8900000), \
10688 X(_ldr, 6800, f8500000), \
10689 X(_ldrb, 7800, f8100000), \
10690 X(_ldrh, 8800, f8300000), \
10691 X(_ldrsb, 5600, f9100000), \
10692 X(_ldrsh, 5e00, f9300000), \
10693 X(_ldr_pc,4800, f85f0000), \
10694 X(_ldr_pc2,4800, f85f0000), \
10695 X(_ldr_sp,9800, f85d0000), \
10696 X(_le, 0000, f00fc001), \
10697 X(_lsl, 0000, fa00f000), \
10698 X(_lsls, 0000, fa10f000), \
10699 X(_lsr, 0800, fa20f000), \
10700 X(_lsrs, 0800, fa30f000), \
10701 X(_mov, 2000, ea4f0000), \
10702 X(_movs, 2000, ea5f0000), \
10703 X(_mul, 4340, fb00f000), \
10704 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10705 X(_mvn, 43c0, ea6f0000), \
10706 X(_mvns, 43c0, ea7f0000), \
10707 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10708 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10709 X(_orr, 4300, ea400000), \
10710 X(_orrs, 4300, ea500000), \
10711 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10712 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10713 X(_rev, ba00, fa90f080), \
10714 X(_rev16, ba40, fa90f090), \
10715 X(_revsh, bac0, fa90f0b0), \
10716 X(_ror, 41c0, fa60f000), \
10717 X(_rors, 41c0, fa70f000), \
10718 X(_sbc, 4180, eb600000), \
10719 X(_sbcs, 4180, eb700000), \
10720 X(_stmia, c000, e8800000), \
10721 X(_str, 6000, f8400000), \
10722 X(_strb, 7000, f8000000), \
10723 X(_strh, 8000, f8200000), \
10724 X(_str_sp,9000, f84d0000), \
10725 X(_sub, 1e00, eba00000), \
10726 X(_subs, 1e00, ebb00000), \
10727 X(_subi, 8000, f1a00000), \
10728 X(_subis, 8000, f1b00000), \
10729 X(_sxtb, b240, fa4ff080), \
10730 X(_sxth, b200, fa0ff080), \
10731 X(_tst, 4200, ea100f00), \
10732 X(_uxtb, b2c0, fa5ff080), \
10733 X(_uxth, b280, fa1ff080), \
10734 X(_nop, bf00, f3af8000), \
10735 X(_yield, bf10, f3af8001), \
10736 X(_wfe, bf20, f3af8002), \
10737 X(_wfi, bf30, f3af8003), \
10738 X(_wls, 0000, f040c001), \
10739 X(_sev, bf40, f3af8004), \
10740 X(_sevl, bf50, f3af8005), \
10741 X(_udf, de00, f7f0a000)
10743 /* To catch errors in encoding functions, the codes are all offset by
10744 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10745 as 16-bit instructions. */
10746 #define X(a,b,c) T_MNEM##a
10747 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10750 #define X(a,b,c) 0x##b
10751 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10752 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10755 #define X(a,b,c) 0x##c
10756 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10757 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10758 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10762 /* Thumb instruction encoders, in alphabetical order. */
10764 /* ADDW or SUBW. */
10767 do_t_add_sub_w (void)
10771 Rd
= inst
.operands
[0].reg
;
10772 Rn
= inst
.operands
[1].reg
;
10774 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10775 is the SP-{plus,minus}-immediate form of the instruction. */
10777 constraint (Rd
== REG_PC
, BAD_PC
);
10779 reject_bad_reg (Rd
);
10781 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10782 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10785 /* Parse an add or subtract instruction. We get here with inst.instruction
10786 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10789 do_t_add_sub (void)
10793 Rd
= inst
.operands
[0].reg
;
10794 Rs
= (inst
.operands
[1].present
10795 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10796 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10799 set_it_insn_type_last ();
10801 if (unified_syntax
)
10804 bfd_boolean narrow
;
10807 flags
= (inst
.instruction
== T_MNEM_adds
10808 || inst
.instruction
== T_MNEM_subs
);
10810 narrow
= !in_it_block ();
10812 narrow
= in_it_block ();
10813 if (!inst
.operands
[2].isreg
)
10817 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10818 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10820 add
= (inst
.instruction
== T_MNEM_add
10821 || inst
.instruction
== T_MNEM_adds
);
10823 if (inst
.size_req
!= 4)
10825 /* Attempt to use a narrow opcode, with relaxation if
10827 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10828 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10829 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10830 opcode
= T_MNEM_add_sp
;
10831 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10832 opcode
= T_MNEM_add_pc
;
10833 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10836 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10838 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10842 inst
.instruction
= THUMB_OP16(opcode
);
10843 inst
.instruction
|= (Rd
<< 4) | Rs
;
10844 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10845 || (inst
.relocs
[0].type
10846 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
10848 if (inst
.size_req
== 2)
10849 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10851 inst
.relax
= opcode
;
10855 constraint (inst
.size_req
== 2, BAD_HIREG
);
10857 if (inst
.size_req
== 4
10858 || (inst
.size_req
!= 2 && !opcode
))
10860 constraint ((inst
.relocs
[0].type
10861 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
10862 && (inst
.relocs
[0].type
10863 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
10864 THUMB1_RELOC_ONLY
);
10867 constraint (add
, BAD_PC
);
10868 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10869 _("only SUBS PC, LR, #const allowed"));
10870 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10871 _("expression too complex"));
10872 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10873 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
10874 _("immediate value out of range"));
10875 inst
.instruction
= T2_SUBS_PC_LR
10876 | inst
.relocs
[0].exp
.X_add_number
;
10877 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10880 else if (Rs
== REG_PC
)
10882 /* Always use addw/subw. */
10883 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10884 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10888 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10889 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10892 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10894 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10896 inst
.instruction
|= Rd
<< 8;
10897 inst
.instruction
|= Rs
<< 16;
10902 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10903 unsigned int shift
= inst
.operands
[2].shift_kind
;
10905 Rn
= inst
.operands
[2].reg
;
10906 /* See if we can do this with a 16-bit instruction. */
10907 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10909 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10914 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10915 || inst
.instruction
== T_MNEM_add
)
10917 : T_OPCODE_SUB_R3
);
10918 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10922 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10924 /* Thumb-1 cores (except v6-M) require at least one high
10925 register in a narrow non flag setting add. */
10926 if (Rd
> 7 || Rn
> 7
10927 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10928 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10935 inst
.instruction
= T_OPCODE_ADD_HI
;
10936 inst
.instruction
|= (Rd
& 8) << 4;
10937 inst
.instruction
|= (Rd
& 7);
10938 inst
.instruction
|= Rn
<< 3;
10944 constraint (Rd
== REG_PC
, BAD_PC
);
10945 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10946 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10947 constraint (Rs
== REG_PC
, BAD_PC
);
10948 reject_bad_reg (Rn
);
10950 /* If we get here, it can't be done in 16 bits. */
10951 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10952 _("shift must be constant"));
10953 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10954 inst
.instruction
|= Rd
<< 8;
10955 inst
.instruction
|= Rs
<< 16;
10956 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10957 _("shift value over 3 not allowed in thumb mode"));
10958 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10959 _("only LSL shift allowed in thumb mode"));
10960 encode_thumb32_shifted_operand (2);
10965 constraint (inst
.instruction
== T_MNEM_adds
10966 || inst
.instruction
== T_MNEM_subs
,
10969 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10971 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10972 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10975 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10976 ? 0x0000 : 0x8000);
10977 inst
.instruction
|= (Rd
<< 4) | Rs
;
10978 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10982 Rn
= inst
.operands
[2].reg
;
10983 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10985 /* We now have Rd, Rs, and Rn set to registers. */
10986 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10988 /* Can't do this for SUB. */
10989 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10990 inst
.instruction
= T_OPCODE_ADD_HI
;
10991 inst
.instruction
|= (Rd
& 8) << 4;
10992 inst
.instruction
|= (Rd
& 7);
10994 inst
.instruction
|= Rn
<< 3;
10996 inst
.instruction
|= Rs
<< 3;
10998 constraint (1, _("dest must overlap one source register"));
11002 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11003 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
11004 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11014 Rd
= inst
.operands
[0].reg
;
11015 reject_bad_reg (Rd
);
11017 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
11019 /* Defer to section relaxation. */
11020 inst
.relax
= inst
.instruction
;
11021 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11022 inst
.instruction
|= Rd
<< 4;
11024 else if (unified_syntax
&& inst
.size_req
!= 2)
11026 /* Generate a 32-bit opcode. */
11027 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11028 inst
.instruction
|= Rd
<< 8;
11029 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
11030 inst
.relocs
[0].pc_rel
= 1;
11034 /* Generate a 16-bit opcode. */
11035 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11036 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11037 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
11038 inst
.relocs
[0].pc_rel
= 1;
11039 inst
.instruction
|= Rd
<< 4;
11042 if (inst
.relocs
[0].exp
.X_op
== O_symbol
11043 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11044 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11045 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11046 inst
.relocs
[0].exp
.X_add_number
+= 1;
11049 /* Arithmetic instructions for which there is just one 16-bit
11050 instruction encoding, and it allows only two low registers.
11051 For maximal compatibility with ARM syntax, we allow three register
11052 operands even when Thumb-32 instructions are not available, as long
11053 as the first two are identical. For instance, both "sbc r0,r1" and
11054 "sbc r0,r0,r1" are allowed. */
11060 Rd
= inst
.operands
[0].reg
;
11061 Rs
= (inst
.operands
[1].present
11062 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11063 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11064 Rn
= inst
.operands
[2].reg
;
11066 reject_bad_reg (Rd
);
11067 reject_bad_reg (Rs
);
11068 if (inst
.operands
[2].isreg
)
11069 reject_bad_reg (Rn
);
11071 if (unified_syntax
)
11073 if (!inst
.operands
[2].isreg
)
11075 /* For an immediate, we always generate a 32-bit opcode;
11076 section relaxation will shrink it later if possible. */
11077 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11078 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11079 inst
.instruction
|= Rd
<< 8;
11080 inst
.instruction
|= Rs
<< 16;
11081 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11085 bfd_boolean narrow
;
11087 /* See if we can do this with a 16-bit instruction. */
11088 if (THUMB_SETS_FLAGS (inst
.instruction
))
11089 narrow
= !in_it_block ();
11091 narrow
= in_it_block ();
11093 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11095 if (inst
.operands
[2].shifted
)
11097 if (inst
.size_req
== 4)
11103 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11104 inst
.instruction
|= Rd
;
11105 inst
.instruction
|= Rn
<< 3;
11109 /* If we get here, it can't be done in 16 bits. */
11110 constraint (inst
.operands
[2].shifted
11111 && inst
.operands
[2].immisreg
,
11112 _("shift must be constant"));
11113 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11114 inst
.instruction
|= Rd
<< 8;
11115 inst
.instruction
|= Rs
<< 16;
11116 encode_thumb32_shifted_operand (2);
11121 /* On its face this is a lie - the instruction does set the
11122 flags. However, the only supported mnemonic in this mode
11123 says it doesn't. */
11124 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11126 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11127 _("unshifted register required"));
11128 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11129 constraint (Rd
!= Rs
,
11130 _("dest and source1 must be the same register"));
11132 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11133 inst
.instruction
|= Rd
;
11134 inst
.instruction
|= Rn
<< 3;
11138 /* Similarly, but for instructions where the arithmetic operation is
11139 commutative, so we can allow either of them to be different from
11140 the destination operand in a 16-bit instruction. For instance, all
11141 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11148 Rd
= inst
.operands
[0].reg
;
11149 Rs
= (inst
.operands
[1].present
11150 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11151 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11152 Rn
= inst
.operands
[2].reg
;
11154 reject_bad_reg (Rd
);
11155 reject_bad_reg (Rs
);
11156 if (inst
.operands
[2].isreg
)
11157 reject_bad_reg (Rn
);
11159 if (unified_syntax
)
11161 if (!inst
.operands
[2].isreg
)
11163 /* For an immediate, we always generate a 32-bit opcode;
11164 section relaxation will shrink it later if possible. */
11165 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11166 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11167 inst
.instruction
|= Rd
<< 8;
11168 inst
.instruction
|= Rs
<< 16;
11169 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11173 bfd_boolean narrow
;
11175 /* See if we can do this with a 16-bit instruction. */
11176 if (THUMB_SETS_FLAGS (inst
.instruction
))
11177 narrow
= !in_it_block ();
11179 narrow
= in_it_block ();
11181 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11183 if (inst
.operands
[2].shifted
)
11185 if (inst
.size_req
== 4)
11192 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11193 inst
.instruction
|= Rd
;
11194 inst
.instruction
|= Rn
<< 3;
11199 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11200 inst
.instruction
|= Rd
;
11201 inst
.instruction
|= Rs
<< 3;
11206 /* If we get here, it can't be done in 16 bits. */
11207 constraint (inst
.operands
[2].shifted
11208 && inst
.operands
[2].immisreg
,
11209 _("shift must be constant"));
11210 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11211 inst
.instruction
|= Rd
<< 8;
11212 inst
.instruction
|= Rs
<< 16;
11213 encode_thumb32_shifted_operand (2);
11218 /* On its face this is a lie - the instruction does set the
11219 flags. However, the only supported mnemonic in this mode
11220 says it doesn't. */
11221 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11223 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11224 _("unshifted register required"));
11225 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11227 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11228 inst
.instruction
|= Rd
;
11231 inst
.instruction
|= Rn
<< 3;
11233 inst
.instruction
|= Rs
<< 3;
11235 constraint (1, _("dest must overlap one source register"));
11243 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
11244 constraint (msb
> 32, _("bit-field extends past end of register"));
11245 /* The instruction encoding stores the LSB and MSB,
11246 not the LSB and width. */
11247 Rd
= inst
.operands
[0].reg
;
11248 reject_bad_reg (Rd
);
11249 inst
.instruction
|= Rd
<< 8;
11250 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
11251 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11252 inst
.instruction
|= msb
- 1;
11261 Rd
= inst
.operands
[0].reg
;
11262 reject_bad_reg (Rd
);
11264 /* #0 in second position is alternative syntax for bfc, which is
11265 the same instruction but with REG_PC in the Rm field. */
11266 if (!inst
.operands
[1].isreg
)
11270 Rn
= inst
.operands
[1].reg
;
11271 reject_bad_reg (Rn
);
11274 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11275 constraint (msb
> 32, _("bit-field extends past end of register"));
11276 /* The instruction encoding stores the LSB and MSB,
11277 not the LSB and width. */
11278 inst
.instruction
|= Rd
<< 8;
11279 inst
.instruction
|= Rn
<< 16;
11280 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11281 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11282 inst
.instruction
|= msb
- 1;
11290 Rd
= inst
.operands
[0].reg
;
11291 Rn
= inst
.operands
[1].reg
;
11293 reject_bad_reg (Rd
);
11294 reject_bad_reg (Rn
);
11296 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11297 _("bit-field extends past end of register"));
11298 inst
.instruction
|= Rd
<< 8;
11299 inst
.instruction
|= Rn
<< 16;
11300 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11301 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11302 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11305 /* ARM V5 Thumb BLX (argument parse)
11306 BLX <target_addr> which is BLX(1)
11307 BLX <Rm> which is BLX(2)
11308 Unfortunately, there are two different opcodes for this mnemonic.
11309 So, the insns[].value is not used, and the code here zaps values
11310 into inst.instruction.
11312 ??? How to take advantage of the additional two bits of displacement
11313 available in Thumb32 mode? Need new relocation? */
11318 set_it_insn_type_last ();
11320 if (inst
.operands
[0].isreg
)
11322 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11323 /* We have a register, so this is BLX(2). */
11324 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11328 /* No register. This must be BLX(1). */
11329 inst
.instruction
= 0xf000e800;
11330 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11339 bfd_reloc_code_real_type reloc
;
11342 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
11344 if (in_it_block ())
11346 /* Conditional branches inside IT blocks are encoded as unconditional
11348 cond
= COND_ALWAYS
;
11353 if (cond
!= COND_ALWAYS
)
11354 opcode
= T_MNEM_bcond
;
11356 opcode
= inst
.instruction
;
11359 && (inst
.size_req
== 4
11360 || (inst
.size_req
!= 2
11361 && (inst
.operands
[0].hasreloc
11362 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
11364 inst
.instruction
= THUMB_OP32(opcode
);
11365 if (cond
== COND_ALWAYS
)
11366 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11369 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11370 _("selected architecture does not support "
11371 "wide conditional branch instruction"));
11373 gas_assert (cond
!= 0xF);
11374 inst
.instruction
|= cond
<< 22;
11375 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11380 inst
.instruction
= THUMB_OP16(opcode
);
11381 if (cond
== COND_ALWAYS
)
11382 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11385 inst
.instruction
|= cond
<< 8;
11386 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11388 /* Allow section relaxation. */
11389 if (unified_syntax
&& inst
.size_req
!= 2)
11390 inst
.relax
= opcode
;
11392 inst
.relocs
[0].type
= reloc
;
11393 inst
.relocs
[0].pc_rel
= 1;
11396 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11397 between the two is the maximum immediate allowed - which is passed in
11400 do_t_bkpt_hlt1 (int range
)
11402 constraint (inst
.cond
!= COND_ALWAYS
,
11403 _("instruction is always unconditional"));
11404 if (inst
.operands
[0].present
)
11406 constraint (inst
.operands
[0].imm
> range
,
11407 _("immediate value out of range"));
11408 inst
.instruction
|= inst
.operands
[0].imm
;
11411 set_it_insn_type (NEUTRAL_IT_INSN
);
11417 do_t_bkpt_hlt1 (63);
11423 do_t_bkpt_hlt1 (255);
11427 do_t_branch23 (void)
11429 set_it_insn_type_last ();
11430 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11432 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11433 this file. We used to simply ignore the PLT reloc type here --
11434 the branch encoding is now needed to deal with TLSCALL relocs.
11435 So if we see a PLT reloc now, put it back to how it used to be to
11436 keep the preexisting behaviour. */
11437 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
11438 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11440 #if defined(OBJ_COFF)
11441 /* If the destination of the branch is a defined symbol which does not have
11442 the THUMB_FUNC attribute, then we must be calling a function which has
11443 the (interfacearm) attribute. We look for the Thumb entry point to that
11444 function and change the branch to refer to that function instead. */
11445 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
11446 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11447 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11448 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11449 inst
.relocs
[0].exp
.X_add_symbol
11450 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
11457 set_it_insn_type_last ();
11458 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11459 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11460 should cause the alignment to be checked once it is known. This is
11461 because BX PC only works if the instruction is word aligned. */
11469 set_it_insn_type_last ();
11470 Rm
= inst
.operands
[0].reg
;
11471 reject_bad_reg (Rm
);
11472 inst
.instruction
|= Rm
<< 16;
11481 Rd
= inst
.operands
[0].reg
;
11482 Rm
= inst
.operands
[1].reg
;
11484 reject_bad_reg (Rd
);
11485 reject_bad_reg (Rm
);
11487 inst
.instruction
|= Rd
<< 8;
11488 inst
.instruction
|= Rm
<< 16;
11489 inst
.instruction
|= Rm
;
11495 set_it_insn_type (OUTSIDE_IT_INSN
);
11501 set_it_insn_type (OUTSIDE_IT_INSN
);
11502 inst
.instruction
|= inst
.operands
[0].imm
;
11508 set_it_insn_type (OUTSIDE_IT_INSN
);
11510 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11511 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11513 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11514 inst
.instruction
= 0xf3af8000;
11515 inst
.instruction
|= imod
<< 9;
11516 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11517 if (inst
.operands
[1].present
)
11518 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11522 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11523 && (inst
.operands
[0].imm
& 4),
11524 _("selected processor does not support 'A' form "
11525 "of this instruction"));
11526 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11527 _("Thumb does not support the 2-argument "
11528 "form of this instruction"));
11529 inst
.instruction
|= inst
.operands
[0].imm
;
11533 /* THUMB CPY instruction (argument parse). */
11538 if (inst
.size_req
== 4)
11540 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11541 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11542 inst
.instruction
|= inst
.operands
[1].reg
;
11546 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11547 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11548 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11555 set_it_insn_type (OUTSIDE_IT_INSN
);
11556 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11557 inst
.instruction
|= inst
.operands
[0].reg
;
11558 inst
.relocs
[0].pc_rel
= 1;
11559 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11565 inst
.instruction
|= inst
.operands
[0].imm
;
11571 unsigned Rd
, Rn
, Rm
;
11573 Rd
= inst
.operands
[0].reg
;
11574 Rn
= (inst
.operands
[1].present
11575 ? inst
.operands
[1].reg
: Rd
);
11576 Rm
= inst
.operands
[2].reg
;
11578 reject_bad_reg (Rd
);
11579 reject_bad_reg (Rn
);
11580 reject_bad_reg (Rm
);
11582 inst
.instruction
|= Rd
<< 8;
11583 inst
.instruction
|= Rn
<< 16;
11584 inst
.instruction
|= Rm
;
11590 if (unified_syntax
&& inst
.size_req
== 4)
11591 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11593 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11599 unsigned int cond
= inst
.operands
[0].imm
;
11601 set_it_insn_type (IT_INSN
);
11602 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11604 now_it
.warn_deprecated
= FALSE
;
11606 /* If the condition is a negative condition, invert the mask. */
11607 if ((cond
& 0x1) == 0x0)
11609 unsigned int mask
= inst
.instruction
& 0x000f;
11611 if ((mask
& 0x7) == 0)
11613 /* No conversion needed. */
11614 now_it
.block_length
= 1;
11616 else if ((mask
& 0x3) == 0)
11619 now_it
.block_length
= 2;
11621 else if ((mask
& 0x1) == 0)
11624 now_it
.block_length
= 3;
11629 now_it
.block_length
= 4;
11632 inst
.instruction
&= 0xfff0;
11633 inst
.instruction
|= mask
;
11636 inst
.instruction
|= cond
<< 4;
11639 /* Helper function used for both push/pop and ldm/stm. */
11641 encode_thumb2_multi (bfd_boolean do_io
, int base
, unsigned mask
,
11642 bfd_boolean writeback
)
11644 bfd_boolean load
, store
;
11646 gas_assert (base
!= -1 || !do_io
);
11647 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
11648 store
= do_io
&& !load
;
11650 if (mask
& (1 << 13))
11651 inst
.error
= _("SP not allowed in register list");
11653 if (do_io
&& (mask
& (1 << base
)) != 0
11655 inst
.error
= _("having the base register in the register list when "
11656 "using write back is UNPREDICTABLE");
11660 if (mask
& (1 << 15))
11662 if (mask
& (1 << 14))
11663 inst
.error
= _("LR and PC should not both be in register list");
11665 set_it_insn_type_last ();
11670 if (mask
& (1 << 15))
11671 inst
.error
= _("PC not allowed in register list");
11674 if (do_io
&& ((mask
& (mask
- 1)) == 0))
11676 /* Single register transfers implemented as str/ldr. */
11679 if (inst
.instruction
& (1 << 23))
11680 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11682 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11686 if (inst
.instruction
& (1 << 23))
11687 inst
.instruction
= 0x00800000; /* ia -> [base] */
11689 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11692 inst
.instruction
|= 0xf8400000;
11694 inst
.instruction
|= 0x00100000;
11696 mask
= ffs (mask
) - 1;
11699 else if (writeback
)
11700 inst
.instruction
|= WRITE_BACK
;
11702 inst
.instruction
|= mask
;
11704 inst
.instruction
|= base
<< 16;
11710 /* This really doesn't seem worth it. */
11711 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
11712 _("expression too complex"));
11713 constraint (inst
.operands
[1].writeback
,
11714 _("Thumb load/store multiple does not support {reglist}^"));
11716 if (unified_syntax
)
11718 bfd_boolean narrow
;
11722 /* See if we can use a 16-bit instruction. */
11723 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11724 && inst
.size_req
!= 4
11725 && !(inst
.operands
[1].imm
& ~0xff))
11727 mask
= 1 << inst
.operands
[0].reg
;
11729 if (inst
.operands
[0].reg
<= 7)
11731 if (inst
.instruction
== T_MNEM_stmia
11732 ? inst
.operands
[0].writeback
11733 : (inst
.operands
[0].writeback
11734 == !(inst
.operands
[1].imm
& mask
)))
11736 if (inst
.instruction
== T_MNEM_stmia
11737 && (inst
.operands
[1].imm
& mask
)
11738 && (inst
.operands
[1].imm
& (mask
- 1)))
11739 as_warn (_("value stored for r%d is UNKNOWN"),
11740 inst
.operands
[0].reg
);
11742 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11743 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11744 inst
.instruction
|= inst
.operands
[1].imm
;
11747 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11749 /* This means 1 register in reg list one of 3 situations:
11750 1. Instruction is stmia, but without writeback.
11751 2. lmdia without writeback, but with Rn not in
11753 3. ldmia with writeback, but with Rn in reglist.
11754 Case 3 is UNPREDICTABLE behaviour, so we handle
11755 case 1 and 2 which can be converted into a 16-bit
11756 str or ldr. The SP cases are handled below. */
11757 unsigned long opcode
;
11758 /* First, record an error for Case 3. */
11759 if (inst
.operands
[1].imm
& mask
11760 && inst
.operands
[0].writeback
)
11762 _("having the base register in the register list when "
11763 "using write back is UNPREDICTABLE");
11765 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11767 inst
.instruction
= THUMB_OP16 (opcode
);
11768 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11769 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11773 else if (inst
.operands
[0] .reg
== REG_SP
)
11775 if (inst
.operands
[0].writeback
)
11778 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11779 ? T_MNEM_push
: T_MNEM_pop
);
11780 inst
.instruction
|= inst
.operands
[1].imm
;
11783 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11786 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11787 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11788 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11796 if (inst
.instruction
< 0xffff)
11797 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11799 encode_thumb2_multi (TRUE
/* do_io */, inst
.operands
[0].reg
,
11800 inst
.operands
[1].imm
,
11801 inst
.operands
[0].writeback
);
11806 constraint (inst
.operands
[0].reg
> 7
11807 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11808 constraint (inst
.instruction
!= T_MNEM_ldmia
11809 && inst
.instruction
!= T_MNEM_stmia
,
11810 _("Thumb-2 instruction only valid in unified syntax"));
11811 if (inst
.instruction
== T_MNEM_stmia
)
11813 if (!inst
.operands
[0].writeback
)
11814 as_warn (_("this instruction will write back the base register"));
11815 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11816 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11817 as_warn (_("value stored for r%d is UNKNOWN"),
11818 inst
.operands
[0].reg
);
11822 if (!inst
.operands
[0].writeback
11823 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11824 as_warn (_("this instruction will write back the base register"));
11825 else if (inst
.operands
[0].writeback
11826 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11827 as_warn (_("this instruction will not write back the base register"));
11830 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11831 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11832 inst
.instruction
|= inst
.operands
[1].imm
;
11839 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11840 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11841 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11842 || inst
.operands
[1].negative
,
11845 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11847 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11848 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11849 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11855 if (!inst
.operands
[1].present
)
11857 constraint (inst
.operands
[0].reg
== REG_LR
,
11858 _("r14 not allowed as first register "
11859 "when second register is omitted"));
11860 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11862 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11865 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11866 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11867 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11873 unsigned long opcode
;
11876 if (inst
.operands
[0].isreg
11877 && !inst
.operands
[0].preind
11878 && inst
.operands
[0].reg
== REG_PC
)
11879 set_it_insn_type_last ();
11881 opcode
= inst
.instruction
;
11882 if (unified_syntax
)
11884 if (!inst
.operands
[1].isreg
)
11886 if (opcode
<= 0xffff)
11887 inst
.instruction
= THUMB_OP32 (opcode
);
11888 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11891 if (inst
.operands
[1].isreg
11892 && !inst
.operands
[1].writeback
11893 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11894 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11895 && opcode
<= 0xffff
11896 && inst
.size_req
!= 4)
11898 /* Insn may have a 16-bit form. */
11899 Rn
= inst
.operands
[1].reg
;
11900 if (inst
.operands
[1].immisreg
)
11902 inst
.instruction
= THUMB_OP16 (opcode
);
11904 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11906 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11907 reject_bad_reg (inst
.operands
[1].imm
);
11909 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11910 && opcode
!= T_MNEM_ldrsb
)
11911 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11912 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11919 if (inst
.relocs
[0].pc_rel
)
11920 opcode
= T_MNEM_ldr_pc2
;
11922 opcode
= T_MNEM_ldr_pc
;
11926 if (opcode
== T_MNEM_ldr
)
11927 opcode
= T_MNEM_ldr_sp
;
11929 opcode
= T_MNEM_str_sp
;
11931 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11935 inst
.instruction
= inst
.operands
[0].reg
;
11936 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11938 inst
.instruction
|= THUMB_OP16 (opcode
);
11939 if (inst
.size_req
== 2)
11940 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11942 inst
.relax
= opcode
;
11946 /* Definitely a 32-bit variant. */
11948 /* Warning for Erratum 752419. */
11949 if (opcode
== T_MNEM_ldr
11950 && inst
.operands
[0].reg
== REG_SP
11951 && inst
.operands
[1].writeback
== 1
11952 && !inst
.operands
[1].immisreg
)
11954 if (no_cpu_selected ()
11955 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11956 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11957 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11958 as_warn (_("This instruction may be unpredictable "
11959 "if executed on M-profile cores "
11960 "with interrupts enabled."));
11963 /* Do some validations regarding addressing modes. */
11964 if (inst
.operands
[1].immisreg
)
11965 reject_bad_reg (inst
.operands
[1].imm
);
11967 constraint (inst
.operands
[1].writeback
== 1
11968 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11971 inst
.instruction
= THUMB_OP32 (opcode
);
11972 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11973 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11974 check_ldr_r15_aligned ();
11978 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11980 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11982 /* Only [Rn,Rm] is acceptable. */
11983 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11984 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11985 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11986 || inst
.operands
[1].negative
,
11987 _("Thumb does not support this addressing mode"));
11988 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11992 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11993 if (!inst
.operands
[1].isreg
)
11994 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11997 constraint (!inst
.operands
[1].preind
11998 || inst
.operands
[1].shifted
11999 || inst
.operands
[1].writeback
,
12000 _("Thumb does not support this addressing mode"));
12001 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
12003 constraint (inst
.instruction
& 0x0600,
12004 _("byte or halfword not valid for base register"));
12005 constraint (inst
.operands
[1].reg
== REG_PC
12006 && !(inst
.instruction
& THUMB_LOAD_BIT
),
12007 _("r15 based store not allowed"));
12008 constraint (inst
.operands
[1].immisreg
,
12009 _("invalid base register for register offset"));
12011 if (inst
.operands
[1].reg
== REG_PC
)
12012 inst
.instruction
= T_OPCODE_LDR_PC
;
12013 else if (inst
.instruction
& THUMB_LOAD_BIT
)
12014 inst
.instruction
= T_OPCODE_LDR_SP
;
12016 inst
.instruction
= T_OPCODE_STR_SP
;
12018 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12019 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12023 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
12024 if (!inst
.operands
[1].immisreg
)
12026 /* Immediate offset. */
12027 inst
.instruction
|= inst
.operands
[0].reg
;
12028 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12029 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12033 /* Register offset. */
12034 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
12035 constraint (inst
.operands
[1].negative
,
12036 _("Thumb does not support this addressing mode"));
12039 switch (inst
.instruction
)
12041 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
12042 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
12043 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
12044 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
12045 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
12046 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
12047 case 0x5600 /* ldrsb */:
12048 case 0x5e00 /* ldrsh */: break;
12052 inst
.instruction
|= inst
.operands
[0].reg
;
12053 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12054 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
12060 if (!inst
.operands
[1].present
)
12062 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12063 constraint (inst
.operands
[0].reg
== REG_LR
,
12064 _("r14 not allowed here"));
12065 constraint (inst
.operands
[0].reg
== REG_R12
,
12066 _("r12 not allowed here"));
12069 if (inst
.operands
[2].writeback
12070 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
12071 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
12072 as_warn (_("base register written back, and overlaps "
12073 "one of transfer registers"));
12075 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12076 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12077 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
12083 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12084 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
12090 unsigned Rd
, Rn
, Rm
, Ra
;
12092 Rd
= inst
.operands
[0].reg
;
12093 Rn
= inst
.operands
[1].reg
;
12094 Rm
= inst
.operands
[2].reg
;
12095 Ra
= inst
.operands
[3].reg
;
12097 reject_bad_reg (Rd
);
12098 reject_bad_reg (Rn
);
12099 reject_bad_reg (Rm
);
12100 reject_bad_reg (Ra
);
12102 inst
.instruction
|= Rd
<< 8;
12103 inst
.instruction
|= Rn
<< 16;
12104 inst
.instruction
|= Rm
;
12105 inst
.instruction
|= Ra
<< 12;
12111 unsigned RdLo
, RdHi
, Rn
, Rm
;
12113 RdLo
= inst
.operands
[0].reg
;
12114 RdHi
= inst
.operands
[1].reg
;
12115 Rn
= inst
.operands
[2].reg
;
12116 Rm
= inst
.operands
[3].reg
;
12118 reject_bad_reg (RdLo
);
12119 reject_bad_reg (RdHi
);
12120 reject_bad_reg (Rn
);
12121 reject_bad_reg (Rm
);
12123 inst
.instruction
|= RdLo
<< 12;
12124 inst
.instruction
|= RdHi
<< 8;
12125 inst
.instruction
|= Rn
<< 16;
12126 inst
.instruction
|= Rm
;
12130 do_t_mov_cmp (void)
12134 Rn
= inst
.operands
[0].reg
;
12135 Rm
= inst
.operands
[1].reg
;
12138 set_it_insn_type_last ();
12140 if (unified_syntax
)
12142 int r0off
= (inst
.instruction
== T_MNEM_mov
12143 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
12144 unsigned long opcode
;
12145 bfd_boolean narrow
;
12146 bfd_boolean low_regs
;
12148 low_regs
= (Rn
<= 7 && Rm
<= 7);
12149 opcode
= inst
.instruction
;
12150 if (in_it_block ())
12151 narrow
= opcode
!= T_MNEM_movs
;
12153 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
12154 if (inst
.size_req
== 4
12155 || inst
.operands
[1].shifted
)
12158 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12159 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
12160 && !inst
.operands
[1].shifted
12164 inst
.instruction
= T2_SUBS_PC_LR
;
12168 if (opcode
== T_MNEM_cmp
)
12170 constraint (Rn
== REG_PC
, BAD_PC
);
12173 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12175 warn_deprecated_sp (Rm
);
12176 /* R15 was documented as a valid choice for Rm in ARMv6,
12177 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12178 tools reject R15, so we do too. */
12179 constraint (Rm
== REG_PC
, BAD_PC
);
12182 reject_bad_reg (Rm
);
12184 else if (opcode
== T_MNEM_mov
12185 || opcode
== T_MNEM_movs
)
12187 if (inst
.operands
[1].isreg
)
12189 if (opcode
== T_MNEM_movs
)
12191 reject_bad_reg (Rn
);
12192 reject_bad_reg (Rm
);
12196 /* This is mov.n. */
12197 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
12198 && (Rm
== REG_SP
|| Rm
== REG_PC
))
12200 as_tsktsk (_("Use of r%u as a source register is "
12201 "deprecated when r%u is the destination "
12202 "register."), Rm
, Rn
);
12207 /* This is mov.w. */
12208 constraint (Rn
== REG_PC
, BAD_PC
);
12209 constraint (Rm
== REG_PC
, BAD_PC
);
12210 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12211 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
12215 reject_bad_reg (Rn
);
12218 if (!inst
.operands
[1].isreg
)
12220 /* Immediate operand. */
12221 if (!in_it_block () && opcode
== T_MNEM_mov
)
12223 if (low_regs
&& narrow
)
12225 inst
.instruction
= THUMB_OP16 (opcode
);
12226 inst
.instruction
|= Rn
<< 8;
12227 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12228 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
12230 if (inst
.size_req
== 2)
12231 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12233 inst
.relax
= opcode
;
12238 constraint ((inst
.relocs
[0].type
12239 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
12240 && (inst
.relocs
[0].type
12241 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
12242 THUMB1_RELOC_ONLY
);
12244 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12245 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12246 inst
.instruction
|= Rn
<< r0off
;
12247 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12250 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
12251 && (inst
.instruction
== T_MNEM_mov
12252 || inst
.instruction
== T_MNEM_movs
))
12254 /* Register shifts are encoded as separate shift instructions. */
12255 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
12257 if (in_it_block ())
12262 if (inst
.size_req
== 4)
12265 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12271 switch (inst
.operands
[1].shift_kind
)
12274 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12277 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12280 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12283 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12289 inst
.instruction
= opcode
;
12292 inst
.instruction
|= Rn
;
12293 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12298 inst
.instruction
|= CONDS_BIT
;
12300 inst
.instruction
|= Rn
<< 8;
12301 inst
.instruction
|= Rm
<< 16;
12302 inst
.instruction
|= inst
.operands
[1].imm
;
12307 /* Some mov with immediate shift have narrow variants.
12308 Register shifts are handled above. */
12309 if (low_regs
&& inst
.operands
[1].shifted
12310 && (inst
.instruction
== T_MNEM_mov
12311 || inst
.instruction
== T_MNEM_movs
))
12313 if (in_it_block ())
12314 narrow
= (inst
.instruction
== T_MNEM_mov
);
12316 narrow
= (inst
.instruction
== T_MNEM_movs
);
12321 switch (inst
.operands
[1].shift_kind
)
12323 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12324 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12325 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12326 default: narrow
= FALSE
; break;
12332 inst
.instruction
|= Rn
;
12333 inst
.instruction
|= Rm
<< 3;
12334 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12338 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12339 inst
.instruction
|= Rn
<< r0off
;
12340 encode_thumb32_shifted_operand (1);
12344 switch (inst
.instruction
)
12347 /* In v4t or v5t a move of two lowregs produces unpredictable
12348 results. Don't allow this. */
12351 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12352 "MOV Rd, Rs with two low registers is not "
12353 "permitted on this architecture");
12354 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12358 inst
.instruction
= T_OPCODE_MOV_HR
;
12359 inst
.instruction
|= (Rn
& 0x8) << 4;
12360 inst
.instruction
|= (Rn
& 0x7);
12361 inst
.instruction
|= Rm
<< 3;
12365 /* We know we have low registers at this point.
12366 Generate LSLS Rd, Rs, #0. */
12367 inst
.instruction
= T_OPCODE_LSL_I
;
12368 inst
.instruction
|= Rn
;
12369 inst
.instruction
|= Rm
<< 3;
12375 inst
.instruction
= T_OPCODE_CMP_LR
;
12376 inst
.instruction
|= Rn
;
12377 inst
.instruction
|= Rm
<< 3;
12381 inst
.instruction
= T_OPCODE_CMP_HR
;
12382 inst
.instruction
|= (Rn
& 0x8) << 4;
12383 inst
.instruction
|= (Rn
& 0x7);
12384 inst
.instruction
|= Rm
<< 3;
12391 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12393 /* PR 10443: Do not silently ignore shifted operands. */
12394 constraint (inst
.operands
[1].shifted
,
12395 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12397 if (inst
.operands
[1].isreg
)
12399 if (Rn
< 8 && Rm
< 8)
12401 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12402 since a MOV instruction produces unpredictable results. */
12403 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12404 inst
.instruction
= T_OPCODE_ADD_I3
;
12406 inst
.instruction
= T_OPCODE_CMP_LR
;
12408 inst
.instruction
|= Rn
;
12409 inst
.instruction
|= Rm
<< 3;
12413 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12414 inst
.instruction
= T_OPCODE_MOV_HR
;
12416 inst
.instruction
= T_OPCODE_CMP_HR
;
12422 constraint (Rn
> 7,
12423 _("only lo regs allowed with immediate"));
12424 inst
.instruction
|= Rn
<< 8;
12425 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12436 top
= (inst
.instruction
& 0x00800000) != 0;
12437 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
12439 constraint (top
, _(":lower16: not allowed in this instruction"));
12440 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
12442 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
12444 constraint (!top
, _(":upper16: not allowed in this instruction"));
12445 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
12448 Rd
= inst
.operands
[0].reg
;
12449 reject_bad_reg (Rd
);
12451 inst
.instruction
|= Rd
<< 8;
12452 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
12454 imm
= inst
.relocs
[0].exp
.X_add_number
;
12455 inst
.instruction
|= (imm
& 0xf000) << 4;
12456 inst
.instruction
|= (imm
& 0x0800) << 15;
12457 inst
.instruction
|= (imm
& 0x0700) << 4;
12458 inst
.instruction
|= (imm
& 0x00ff);
12463 do_t_mvn_tst (void)
12467 Rn
= inst
.operands
[0].reg
;
12468 Rm
= inst
.operands
[1].reg
;
12470 if (inst
.instruction
== T_MNEM_cmp
12471 || inst
.instruction
== T_MNEM_cmn
)
12472 constraint (Rn
== REG_PC
, BAD_PC
);
12474 reject_bad_reg (Rn
);
12475 reject_bad_reg (Rm
);
12477 if (unified_syntax
)
12479 int r0off
= (inst
.instruction
== T_MNEM_mvn
12480 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12481 bfd_boolean narrow
;
12483 if (inst
.size_req
== 4
12484 || inst
.instruction
> 0xffff
12485 || inst
.operands
[1].shifted
12486 || Rn
> 7 || Rm
> 7)
12488 else if (inst
.instruction
== T_MNEM_cmn
12489 || inst
.instruction
== T_MNEM_tst
)
12491 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12492 narrow
= !in_it_block ();
12494 narrow
= in_it_block ();
12496 if (!inst
.operands
[1].isreg
)
12498 /* For an immediate, we always generate a 32-bit opcode;
12499 section relaxation will shrink it later if possible. */
12500 if (inst
.instruction
< 0xffff)
12501 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12502 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12503 inst
.instruction
|= Rn
<< r0off
;
12504 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12508 /* See if we can do this with a 16-bit instruction. */
12511 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12512 inst
.instruction
|= Rn
;
12513 inst
.instruction
|= Rm
<< 3;
12517 constraint (inst
.operands
[1].shifted
12518 && inst
.operands
[1].immisreg
,
12519 _("shift must be constant"));
12520 if (inst
.instruction
< 0xffff)
12521 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12522 inst
.instruction
|= Rn
<< r0off
;
12523 encode_thumb32_shifted_operand (1);
12529 constraint (inst
.instruction
> 0xffff
12530 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12531 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12532 _("unshifted register required"));
12533 constraint (Rn
> 7 || Rm
> 7,
12536 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12537 inst
.instruction
|= Rn
;
12538 inst
.instruction
|= Rm
<< 3;
12547 if (do_vfp_nsyn_mrs () == SUCCESS
)
12550 Rd
= inst
.operands
[0].reg
;
12551 reject_bad_reg (Rd
);
12552 inst
.instruction
|= Rd
<< 8;
12554 if (inst
.operands
[1].isreg
)
12556 unsigned br
= inst
.operands
[1].reg
;
12557 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12558 as_bad (_("bad register for mrs"));
12560 inst
.instruction
|= br
& (0xf << 16);
12561 inst
.instruction
|= (br
& 0x300) >> 4;
12562 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12566 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12568 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12570 /* PR gas/12698: The constraint is only applied for m_profile.
12571 If the user has specified -march=all, we want to ignore it as
12572 we are building for any CPU type, including non-m variants. */
12573 bfd_boolean m_profile
=
12574 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12575 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12576 "not support requested special purpose register"));
12579 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12581 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12582 _("'APSR', 'CPSR' or 'SPSR' expected"));
12584 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12585 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12586 inst
.instruction
|= 0xf0000;
12596 if (do_vfp_nsyn_msr () == SUCCESS
)
12599 constraint (!inst
.operands
[1].isreg
,
12600 _("Thumb encoding does not support an immediate here"));
12602 if (inst
.operands
[0].isreg
)
12603 flags
= (int)(inst
.operands
[0].reg
);
12605 flags
= inst
.operands
[0].imm
;
12607 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12609 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12611 /* PR gas/12698: The constraint is only applied for m_profile.
12612 If the user has specified -march=all, we want to ignore it as
12613 we are building for any CPU type, including non-m variants. */
12614 bfd_boolean m_profile
=
12615 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12616 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12617 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12618 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12619 && bits
!= PSR_f
)) && m_profile
,
12620 _("selected processor does not support requested special "
12621 "purpose register"));
12624 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12625 "requested special purpose register"));
12627 Rn
= inst
.operands
[1].reg
;
12628 reject_bad_reg (Rn
);
12630 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12631 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12632 inst
.instruction
|= (flags
& 0x300) >> 4;
12633 inst
.instruction
|= (flags
& 0xff);
12634 inst
.instruction
|= Rn
<< 16;
12640 bfd_boolean narrow
;
12641 unsigned Rd
, Rn
, Rm
;
12643 if (!inst
.operands
[2].present
)
12644 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12646 Rd
= inst
.operands
[0].reg
;
12647 Rn
= inst
.operands
[1].reg
;
12648 Rm
= inst
.operands
[2].reg
;
12650 if (unified_syntax
)
12652 if (inst
.size_req
== 4
12658 else if (inst
.instruction
== T_MNEM_muls
)
12659 narrow
= !in_it_block ();
12661 narrow
= in_it_block ();
12665 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12666 constraint (Rn
> 7 || Rm
> 7,
12673 /* 16-bit MULS/Conditional MUL. */
12674 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12675 inst
.instruction
|= Rd
;
12678 inst
.instruction
|= Rm
<< 3;
12680 inst
.instruction
|= Rn
<< 3;
12682 constraint (1, _("dest must overlap one source register"));
12686 constraint (inst
.instruction
!= T_MNEM_mul
,
12687 _("Thumb-2 MUL must not set flags"));
12689 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12690 inst
.instruction
|= Rd
<< 8;
12691 inst
.instruction
|= Rn
<< 16;
12692 inst
.instruction
|= Rm
<< 0;
12694 reject_bad_reg (Rd
);
12695 reject_bad_reg (Rn
);
12696 reject_bad_reg (Rm
);
12703 unsigned RdLo
, RdHi
, Rn
, Rm
;
12705 RdLo
= inst
.operands
[0].reg
;
12706 RdHi
= inst
.operands
[1].reg
;
12707 Rn
= inst
.operands
[2].reg
;
12708 Rm
= inst
.operands
[3].reg
;
12710 reject_bad_reg (RdLo
);
12711 reject_bad_reg (RdHi
);
12712 reject_bad_reg (Rn
);
12713 reject_bad_reg (Rm
);
12715 inst
.instruction
|= RdLo
<< 12;
12716 inst
.instruction
|= RdHi
<< 8;
12717 inst
.instruction
|= Rn
<< 16;
12718 inst
.instruction
|= Rm
;
12721 as_tsktsk (_("rdhi and rdlo must be different"));
12727 set_it_insn_type (NEUTRAL_IT_INSN
);
12729 if (unified_syntax
)
12731 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12733 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12734 inst
.instruction
|= inst
.operands
[0].imm
;
12738 /* PR9722: Check for Thumb2 availability before
12739 generating a thumb2 nop instruction. */
12740 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12742 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12743 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12746 inst
.instruction
= 0x46c0;
12751 constraint (inst
.operands
[0].present
,
12752 _("Thumb does not support NOP with hints"));
12753 inst
.instruction
= 0x46c0;
12760 if (unified_syntax
)
12762 bfd_boolean narrow
;
12764 if (THUMB_SETS_FLAGS (inst
.instruction
))
12765 narrow
= !in_it_block ();
12767 narrow
= in_it_block ();
12768 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12770 if (inst
.size_req
== 4)
12775 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12776 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12777 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12781 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12782 inst
.instruction
|= inst
.operands
[0].reg
;
12783 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12788 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12790 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12792 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12793 inst
.instruction
|= inst
.operands
[0].reg
;
12794 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12803 Rd
= inst
.operands
[0].reg
;
12804 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12806 reject_bad_reg (Rd
);
12807 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12808 reject_bad_reg (Rn
);
12810 inst
.instruction
|= Rd
<< 8;
12811 inst
.instruction
|= Rn
<< 16;
12813 if (!inst
.operands
[2].isreg
)
12815 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12816 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12822 Rm
= inst
.operands
[2].reg
;
12823 reject_bad_reg (Rm
);
12825 constraint (inst
.operands
[2].shifted
12826 && inst
.operands
[2].immisreg
,
12827 _("shift must be constant"));
12828 encode_thumb32_shifted_operand (2);
12835 unsigned Rd
, Rn
, Rm
;
12837 Rd
= inst
.operands
[0].reg
;
12838 Rn
= inst
.operands
[1].reg
;
12839 Rm
= inst
.operands
[2].reg
;
12841 reject_bad_reg (Rd
);
12842 reject_bad_reg (Rn
);
12843 reject_bad_reg (Rm
);
12845 inst
.instruction
|= Rd
<< 8;
12846 inst
.instruction
|= Rn
<< 16;
12847 inst
.instruction
|= Rm
;
12848 if (inst
.operands
[3].present
)
12850 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
12851 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
12852 _("expression too complex"));
12853 inst
.instruction
|= (val
& 0x1c) << 10;
12854 inst
.instruction
|= (val
& 0x03) << 6;
12861 if (!inst
.operands
[3].present
)
12865 inst
.instruction
&= ~0x00000020;
12867 /* PR 10168. Swap the Rm and Rn registers. */
12868 Rtmp
= inst
.operands
[1].reg
;
12869 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12870 inst
.operands
[2].reg
= Rtmp
;
12878 if (inst
.operands
[0].immisreg
)
12879 reject_bad_reg (inst
.operands
[0].imm
);
12881 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12885 do_t_push_pop (void)
12889 constraint (inst
.operands
[0].writeback
,
12890 _("push/pop do not support {reglist}^"));
12891 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
12892 _("expression too complex"));
12894 mask
= inst
.operands
[0].imm
;
12895 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12896 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12897 else if (inst
.size_req
!= 4
12898 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
12899 ? REG_LR
: REG_PC
)))
12901 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12902 inst
.instruction
|= THUMB_PP_PC_LR
;
12903 inst
.instruction
|= mask
& 0xff;
12905 else if (unified_syntax
)
12907 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12908 encode_thumb2_multi (TRUE
/* do_io */, 13, mask
, TRUE
);
12912 inst
.error
= _("invalid register list to push/pop instruction");
12920 if (unified_syntax
)
12921 encode_thumb2_multi (FALSE
/* do_io */, -1, inst
.operands
[0].imm
, FALSE
);
12924 inst
.error
= _("invalid register list to push/pop instruction");
12930 do_t_vscclrm (void)
12932 if (inst
.operands
[0].issingle
)
12934 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1) << 22;
12935 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1e) << 11;
12936 inst
.instruction
|= inst
.operands
[0].imm
;
12940 inst
.instruction
|= (inst
.operands
[0].reg
& 0x10) << 18;
12941 inst
.instruction
|= (inst
.operands
[0].reg
& 0xf) << 12;
12942 inst
.instruction
|= 1 << 8;
12943 inst
.instruction
|= inst
.operands
[0].imm
<< 1;
12952 Rd
= inst
.operands
[0].reg
;
12953 Rm
= inst
.operands
[1].reg
;
12955 reject_bad_reg (Rd
);
12956 reject_bad_reg (Rm
);
12958 inst
.instruction
|= Rd
<< 8;
12959 inst
.instruction
|= Rm
<< 16;
12960 inst
.instruction
|= Rm
;
12968 Rd
= inst
.operands
[0].reg
;
12969 Rm
= inst
.operands
[1].reg
;
12971 reject_bad_reg (Rd
);
12972 reject_bad_reg (Rm
);
12974 if (Rd
<= 7 && Rm
<= 7
12975 && inst
.size_req
!= 4)
12977 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12978 inst
.instruction
|= Rd
;
12979 inst
.instruction
|= Rm
<< 3;
12981 else if (unified_syntax
)
12983 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12984 inst
.instruction
|= Rd
<< 8;
12985 inst
.instruction
|= Rm
<< 16;
12986 inst
.instruction
|= Rm
;
12989 inst
.error
= BAD_HIREG
;
12997 Rd
= inst
.operands
[0].reg
;
12998 Rm
= inst
.operands
[1].reg
;
13000 reject_bad_reg (Rd
);
13001 reject_bad_reg (Rm
);
13003 inst
.instruction
|= Rd
<< 8;
13004 inst
.instruction
|= Rm
;
13012 Rd
= inst
.operands
[0].reg
;
13013 Rs
= (inst
.operands
[1].present
13014 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
13015 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
13017 reject_bad_reg (Rd
);
13018 reject_bad_reg (Rs
);
13019 if (inst
.operands
[2].isreg
)
13020 reject_bad_reg (inst
.operands
[2].reg
);
13022 inst
.instruction
|= Rd
<< 8;
13023 inst
.instruction
|= Rs
<< 16;
13024 if (!inst
.operands
[2].isreg
)
13026 bfd_boolean narrow
;
13028 if ((inst
.instruction
& 0x00100000) != 0)
13029 narrow
= !in_it_block ();
13031 narrow
= in_it_block ();
13033 if (Rd
> 7 || Rs
> 7)
13036 if (inst
.size_req
== 4 || !unified_syntax
)
13039 if (inst
.relocs
[0].exp
.X_op
!= O_constant
13040 || inst
.relocs
[0].exp
.X_add_number
!= 0)
13043 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13044 relaxation, but it doesn't seem worth the hassle. */
13047 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13048 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
13049 inst
.instruction
|= Rs
<< 3;
13050 inst
.instruction
|= Rd
;
13054 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13055 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13059 encode_thumb32_shifted_operand (2);
13065 if (warn_on_deprecated
13066 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13067 as_tsktsk (_("setend use is deprecated for ARMv8"));
13069 set_it_insn_type (OUTSIDE_IT_INSN
);
13070 if (inst
.operands
[0].imm
)
13071 inst
.instruction
|= 0x8;
13077 if (!inst
.operands
[1].present
)
13078 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
13080 if (unified_syntax
)
13082 bfd_boolean narrow
;
13085 switch (inst
.instruction
)
13088 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
13090 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
13092 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
13094 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
13098 if (THUMB_SETS_FLAGS (inst
.instruction
))
13099 narrow
= !in_it_block ();
13101 narrow
= in_it_block ();
13102 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13104 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
13106 if (inst
.operands
[2].isreg
13107 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
13108 || inst
.operands
[2].reg
> 7))
13110 if (inst
.size_req
== 4)
13113 reject_bad_reg (inst
.operands
[0].reg
);
13114 reject_bad_reg (inst
.operands
[1].reg
);
13118 if (inst
.operands
[2].isreg
)
13120 reject_bad_reg (inst
.operands
[2].reg
);
13121 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13122 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13123 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13124 inst
.instruction
|= inst
.operands
[2].reg
;
13126 /* PR 12854: Error on extraneous shifts. */
13127 constraint (inst
.operands
[2].shifted
,
13128 _("extraneous shift as part of operand to shift insn"));
13132 inst
.operands
[1].shifted
= 1;
13133 inst
.operands
[1].shift_kind
= shift_kind
;
13134 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
13135 ? T_MNEM_movs
: T_MNEM_mov
);
13136 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13137 encode_thumb32_shifted_operand (1);
13138 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13139 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13144 if (inst
.operands
[2].isreg
)
13146 switch (shift_kind
)
13148 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13149 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13150 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13151 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13155 inst
.instruction
|= inst
.operands
[0].reg
;
13156 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13158 /* PR 12854: Error on extraneous shifts. */
13159 constraint (inst
.operands
[2].shifted
,
13160 _("extraneous shift as part of operand to shift insn"));
13164 switch (shift_kind
)
13166 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13167 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13168 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13171 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13172 inst
.instruction
|= inst
.operands
[0].reg
;
13173 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13179 constraint (inst
.operands
[0].reg
> 7
13180 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
13181 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13183 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
13185 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
13186 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13187 _("source1 and dest must be same register"));
13189 switch (inst
.instruction
)
13191 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13192 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13193 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13194 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13198 inst
.instruction
|= inst
.operands
[0].reg
;
13199 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13201 /* PR 12854: Error on extraneous shifts. */
13202 constraint (inst
.operands
[2].shifted
,
13203 _("extraneous shift as part of operand to shift insn"));
13207 switch (inst
.instruction
)
13209 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13210 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13211 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13212 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
13215 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13216 inst
.instruction
|= inst
.operands
[0].reg
;
13217 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13225 unsigned Rd
, Rn
, Rm
;
13227 Rd
= inst
.operands
[0].reg
;
13228 Rn
= inst
.operands
[1].reg
;
13229 Rm
= inst
.operands
[2].reg
;
13231 reject_bad_reg (Rd
);
13232 reject_bad_reg (Rn
);
13233 reject_bad_reg (Rm
);
13235 inst
.instruction
|= Rd
<< 8;
13236 inst
.instruction
|= Rn
<< 16;
13237 inst
.instruction
|= Rm
;
13243 unsigned Rd
, Rn
, Rm
;
13245 Rd
= inst
.operands
[0].reg
;
13246 Rm
= inst
.operands
[1].reg
;
13247 Rn
= inst
.operands
[2].reg
;
13249 reject_bad_reg (Rd
);
13250 reject_bad_reg (Rn
);
13251 reject_bad_reg (Rm
);
13253 inst
.instruction
|= Rd
<< 8;
13254 inst
.instruction
|= Rn
<< 16;
13255 inst
.instruction
|= Rm
;
13261 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13262 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
13263 _("SMC is not permitted on this architecture"));
13264 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13265 _("expression too complex"));
13266 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13267 inst
.instruction
|= (value
& 0xf000) >> 12;
13268 inst
.instruction
|= (value
& 0x0ff0);
13269 inst
.instruction
|= (value
& 0x000f) << 16;
13270 /* PR gas/15623: SMC instructions must be last in an IT block. */
13271 set_it_insn_type_last ();
13277 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13279 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13280 inst
.instruction
|= (value
& 0x0fff);
13281 inst
.instruction
|= (value
& 0xf000) << 4;
13285 do_t_ssat_usat (int bias
)
13289 Rd
= inst
.operands
[0].reg
;
13290 Rn
= inst
.operands
[2].reg
;
13292 reject_bad_reg (Rd
);
13293 reject_bad_reg (Rn
);
13295 inst
.instruction
|= Rd
<< 8;
13296 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13297 inst
.instruction
|= Rn
<< 16;
13299 if (inst
.operands
[3].present
)
13301 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
13303 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13305 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13306 _("expression too complex"));
13308 if (shift_amount
!= 0)
13310 constraint (shift_amount
> 31,
13311 _("shift expression is too large"));
13313 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13314 inst
.instruction
|= 0x00200000; /* sh bit. */
13316 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13317 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13325 do_t_ssat_usat (1);
13333 Rd
= inst
.operands
[0].reg
;
13334 Rn
= inst
.operands
[2].reg
;
13336 reject_bad_reg (Rd
);
13337 reject_bad_reg (Rn
);
13339 inst
.instruction
|= Rd
<< 8;
13340 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13341 inst
.instruction
|= Rn
<< 16;
13347 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13348 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13349 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13350 || inst
.operands
[2].negative
,
13353 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13355 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13356 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13357 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13358 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13364 if (!inst
.operands
[2].present
)
13365 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13367 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13368 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13369 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13372 inst
.instruction
|= inst
.operands
[0].reg
;
13373 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13374 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13375 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13381 unsigned Rd
, Rn
, Rm
;
13383 Rd
= inst
.operands
[0].reg
;
13384 Rn
= inst
.operands
[1].reg
;
13385 Rm
= inst
.operands
[2].reg
;
13387 reject_bad_reg (Rd
);
13388 reject_bad_reg (Rn
);
13389 reject_bad_reg (Rm
);
13391 inst
.instruction
|= Rd
<< 8;
13392 inst
.instruction
|= Rn
<< 16;
13393 inst
.instruction
|= Rm
;
13394 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13402 Rd
= inst
.operands
[0].reg
;
13403 Rm
= inst
.operands
[1].reg
;
13405 reject_bad_reg (Rd
);
13406 reject_bad_reg (Rm
);
13408 if (inst
.instruction
<= 0xffff
13409 && inst
.size_req
!= 4
13410 && Rd
<= 7 && Rm
<= 7
13411 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13413 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13414 inst
.instruction
|= Rd
;
13415 inst
.instruction
|= Rm
<< 3;
13417 else if (unified_syntax
)
13419 if (inst
.instruction
<= 0xffff)
13420 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13421 inst
.instruction
|= Rd
<< 8;
13422 inst
.instruction
|= Rm
;
13423 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13427 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13428 _("Thumb encoding does not support rotation"));
13429 constraint (1, BAD_HIREG
);
13436 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
13445 half
= (inst
.instruction
& 0x10) != 0;
13446 set_it_insn_type_last ();
13447 constraint (inst
.operands
[0].immisreg
,
13448 _("instruction requires register index"));
13450 Rn
= inst
.operands
[0].reg
;
13451 Rm
= inst
.operands
[0].imm
;
13453 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13454 constraint (Rn
== REG_SP
, BAD_SP
);
13455 reject_bad_reg (Rm
);
13457 constraint (!half
&& inst
.operands
[0].shifted
,
13458 _("instruction does not allow shifted index"));
13459 inst
.instruction
|= (Rn
<< 16) | Rm
;
13465 if (!inst
.operands
[0].present
)
13466 inst
.operands
[0].imm
= 0;
13468 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13470 constraint (inst
.size_req
== 2,
13471 _("immediate value out of range"));
13472 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13473 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13474 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13478 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13479 inst
.instruction
|= inst
.operands
[0].imm
;
13482 set_it_insn_type (NEUTRAL_IT_INSN
);
13489 do_t_ssat_usat (0);
13497 Rd
= inst
.operands
[0].reg
;
13498 Rn
= inst
.operands
[2].reg
;
13500 reject_bad_reg (Rd
);
13501 reject_bad_reg (Rn
);
13503 inst
.instruction
|= Rd
<< 8;
13504 inst
.instruction
|= inst
.operands
[1].imm
;
13505 inst
.instruction
|= Rn
<< 16;
13508 /* Checking the range of the branch offset (VAL) with NBITS bits
13509 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13511 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
13513 gas_assert (nbits
> 0 && nbits
<= 32);
13516 int cmp
= (1 << (nbits
- 1));
13517 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
13522 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
13528 /* For branches in Armv8.1-M Mainline. */
13530 do_t_branch_future (void)
13532 unsigned long insn
= inst
.instruction
;
13534 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13535 if (inst
.operands
[0].hasreloc
== 0)
13537 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, FALSE
) == FAIL
)
13538 as_bad (BAD_BRANCH_OFF
);
13540 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
13544 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
13545 inst
.relocs
[0].pc_rel
= 1;
13551 if (inst
.operands
[1].hasreloc
== 0)
13553 int val
= inst
.operands
[1].imm
;
13554 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, TRUE
) == FAIL
)
13555 as_bad (BAD_BRANCH_OFF
);
13557 int immA
= (val
& 0x0001f000) >> 12;
13558 int immB
= (val
& 0x00000ffc) >> 2;
13559 int immC
= (val
& 0x00000002) >> 1;
13560 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13564 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
13565 inst
.relocs
[1].pc_rel
= 1;
13570 if (inst
.operands
[1].hasreloc
== 0)
13572 int val
= inst
.operands
[1].imm
;
13573 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, TRUE
) == FAIL
)
13574 as_bad (BAD_BRANCH_OFF
);
13576 int immA
= (val
& 0x0007f000) >> 12;
13577 int immB
= (val
& 0x00000ffc) >> 2;
13578 int immC
= (val
& 0x00000002) >> 1;
13579 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13583 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
13584 inst
.relocs
[1].pc_rel
= 1;
13588 case T_MNEM_bfcsel
:
13590 if (inst
.operands
[1].hasreloc
== 0)
13592 int val
= inst
.operands
[1].imm
;
13593 int immA
= (val
& 0x00001000) >> 12;
13594 int immB
= (val
& 0x00000ffc) >> 2;
13595 int immC
= (val
& 0x00000002) >> 1;
13596 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13600 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
13601 inst
.relocs
[1].pc_rel
= 1;
13605 if (inst
.operands
[2].hasreloc
== 0)
13607 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
13608 int val2
= inst
.operands
[2].imm
;
13609 int val0
= inst
.operands
[0].imm
& 0x1f;
13610 int diff
= val2
- val0
;
13612 inst
.instruction
|= 1 << 17; /* T bit. */
13613 else if (diff
!= 2)
13614 as_bad (_("out of range label-relative fixup value"));
13618 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
13619 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
13620 inst
.relocs
[2].pc_rel
= 1;
13624 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
13625 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
13630 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13637 /* Helper function for do_t_loloop to handle relocations. */
13639 v8_1_loop_reloc (int is_le
)
13641 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
13643 int value
= inst
.relocs
[0].exp
.X_add_number
;
13644 value
= (is_le
) ? -value
: value
;
13646 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
13647 as_bad (BAD_BRANCH_OFF
);
13651 immh
= (value
& 0x00000ffc) >> 2;
13652 imml
= (value
& 0x00000002) >> 1;
13654 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
13658 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
13659 inst
.relocs
[0].pc_rel
= 1;
13663 /* To handle the Scalar Low Overhead Loop instructions
13664 in Armv8.1-M Mainline. */
13668 unsigned long insn
= inst
.instruction
;
13670 set_it_insn_type (OUTSIDE_IT_INSN
);
13671 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13677 if (!inst
.operands
[0].present
)
13678 inst
.instruction
|= 1 << 21;
13680 v8_1_loop_reloc (TRUE
);
13684 v8_1_loop_reloc (FALSE
);
13685 /* Fall through. */
13687 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
13688 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
13695 /* Neon instruction encoder helpers. */
13697 /* Encodings for the different types for various Neon opcodes. */
13699 /* An "invalid" code for the following tables. */
13702 struct neon_tab_entry
13705 unsigned float_or_poly
;
13706 unsigned scalar_or_imm
;
13709 /* Map overloaded Neon opcodes to their respective encodings. */
13710 #define NEON_ENC_TAB \
13711 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13712 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13713 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13714 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13715 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13716 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13717 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13718 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13719 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13720 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13721 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13722 /* Register variants of the following two instructions are encoded as
13723 vcge / vcgt with the operands reversed. */ \
13724 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13725 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13726 X(vfma, N_INV, 0x0000c10, N_INV), \
13727 X(vfms, N_INV, 0x0200c10, N_INV), \
13728 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13729 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13730 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13731 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13732 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13733 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13734 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13735 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13736 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13737 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13738 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13739 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13740 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13741 X(vshl, 0x0000400, N_INV, 0x0800510), \
13742 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13743 X(vand, 0x0000110, N_INV, 0x0800030), \
13744 X(vbic, 0x0100110, N_INV, 0x0800030), \
13745 X(veor, 0x1000110, N_INV, N_INV), \
13746 X(vorn, 0x0300110, N_INV, 0x0800010), \
13747 X(vorr, 0x0200110, N_INV, 0x0800010), \
13748 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13749 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13750 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13751 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13752 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13753 X(vst1, 0x0000000, 0x0800000, N_INV), \
13754 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13755 X(vst2, 0x0000100, 0x0800100, N_INV), \
13756 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13757 X(vst3, 0x0000200, 0x0800200, N_INV), \
13758 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13759 X(vst4, 0x0000300, 0x0800300, N_INV), \
13760 X(vmovn, 0x1b20200, N_INV, N_INV), \
13761 X(vtrn, 0x1b20080, N_INV, N_INV), \
13762 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13763 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13764 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13765 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13766 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13767 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13768 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13769 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13770 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13771 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13772 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13773 X(vseleq, 0xe000a00, N_INV, N_INV), \
13774 X(vselvs, 0xe100a00, N_INV, N_INV), \
13775 X(vselge, 0xe200a00, N_INV, N_INV), \
13776 X(vselgt, 0xe300a00, N_INV, N_INV), \
13777 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13778 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13779 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13780 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13781 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13782 X(aes, 0x3b00300, N_INV, N_INV), \
13783 X(sha3op, 0x2000c00, N_INV, N_INV), \
13784 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13785 X(sha2op, 0x3ba0380, N_INV, N_INV)
13789 #define X(OPC,I,F,S) N_MNEM_##OPC
13794 static const struct neon_tab_entry neon_enc_tab
[] =
13796 #define X(OPC,I,F,S) { (I), (F), (S) }
13801 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13802 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13803 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13804 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13805 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13806 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13807 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13808 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13809 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13810 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13811 #define NEON_ENC_SINGLE_(X) \
13812 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13813 #define NEON_ENC_DOUBLE_(X) \
13814 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13815 #define NEON_ENC_FPV8_(X) \
13816 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13818 #define NEON_ENCODE(type, inst) \
13821 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13822 inst.is_neon = 1; \
13826 #define check_neon_suffixes \
13829 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13831 as_bad (_("invalid neon suffix for non neon instruction")); \
13837 /* Define shapes for instruction operands. The following mnemonic characters
13838 are used in this table:
13840 F - VFP S<n> register
13841 D - Neon D<n> register
13842 Q - Neon Q<n> register
13846 L - D<n> register list
13848 This table is used to generate various data:
13849 - enumerations of the form NS_DDR to be used as arguments to
13851 - a table classifying shapes into single, double, quad, mixed.
13852 - a table used to drive neon_select_shape. */
13854 #define NEON_SHAPE_DEF \
13855 X(3, (D, D, D), DOUBLE), \
13856 X(3, (Q, Q, Q), QUAD), \
13857 X(3, (D, D, I), DOUBLE), \
13858 X(3, (Q, Q, I), QUAD), \
13859 X(3, (D, D, S), DOUBLE), \
13860 X(3, (Q, Q, S), QUAD), \
13861 X(2, (D, D), DOUBLE), \
13862 X(2, (Q, Q), QUAD), \
13863 X(2, (D, S), DOUBLE), \
13864 X(2, (Q, S), QUAD), \
13865 X(2, (D, R), DOUBLE), \
13866 X(2, (Q, R), QUAD), \
13867 X(2, (D, I), DOUBLE), \
13868 X(2, (Q, I), QUAD), \
13869 X(3, (D, L, D), DOUBLE), \
13870 X(2, (D, Q), MIXED), \
13871 X(2, (Q, D), MIXED), \
13872 X(3, (D, Q, I), MIXED), \
13873 X(3, (Q, D, I), MIXED), \
13874 X(3, (Q, D, D), MIXED), \
13875 X(3, (D, Q, Q), MIXED), \
13876 X(3, (Q, Q, D), MIXED), \
13877 X(3, (Q, D, S), MIXED), \
13878 X(3, (D, Q, S), MIXED), \
13879 X(4, (D, D, D, I), DOUBLE), \
13880 X(4, (Q, Q, Q, I), QUAD), \
13881 X(4, (D, D, S, I), DOUBLE), \
13882 X(4, (Q, Q, S, I), QUAD), \
13883 X(2, (F, F), SINGLE), \
13884 X(3, (F, F, F), SINGLE), \
13885 X(2, (F, I), SINGLE), \
13886 X(2, (F, D), MIXED), \
13887 X(2, (D, F), MIXED), \
13888 X(3, (F, F, I), MIXED), \
13889 X(4, (R, R, F, F), SINGLE), \
13890 X(4, (F, F, R, R), SINGLE), \
13891 X(3, (D, R, R), DOUBLE), \
13892 X(3, (R, R, D), DOUBLE), \
13893 X(2, (S, R), SINGLE), \
13894 X(2, (R, S), SINGLE), \
13895 X(2, (F, R), SINGLE), \
13896 X(2, (R, F), SINGLE), \
13897 /* Half float shape supported so far. */\
13898 X (2, (H, D), MIXED), \
13899 X (2, (D, H), MIXED), \
13900 X (2, (H, F), MIXED), \
13901 X (2, (F, H), MIXED), \
13902 X (2, (H, H), HALF), \
13903 X (2, (H, R), HALF), \
13904 X (2, (R, H), HALF), \
13905 X (2, (H, I), HALF), \
13906 X (3, (H, H, H), HALF), \
13907 X (3, (H, F, I), MIXED), \
13908 X (3, (F, H, I), MIXED), \
13909 X (3, (D, H, H), MIXED), \
13910 X (3, (D, H, S), MIXED)
13912 #define S2(A,B) NS_##A##B
13913 #define S3(A,B,C) NS_##A##B##C
13914 #define S4(A,B,C,D) NS_##A##B##C##D
13916 #define X(N, L, C) S##N L
13929 enum neon_shape_class
13938 #define X(N, L, C) SC_##C
13940 static enum neon_shape_class neon_shape_class
[] =
13959 /* Register widths of above. */
13960 static unsigned neon_shape_el_size
[] =
13972 struct neon_shape_info
13975 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13978 #define S2(A,B) { SE_##A, SE_##B }
13979 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13980 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13982 #define X(N, L, C) { N, S##N L }
13984 static struct neon_shape_info neon_shape_tab
[] =
13994 /* Bit masks used in type checking given instructions.
13995 'N_EQK' means the type must be the same as (or based on in some way) the key
13996 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13997 set, various other bits can be set as well in order to modify the meaning of
13998 the type constraint. */
14000 enum neon_type_mask
14024 N_KEY
= 0x1000000, /* Key element (main type specifier). */
14025 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
14026 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
14027 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
14028 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
14029 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
14030 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14031 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14032 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14033 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
14034 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14036 N_MAX_NONSPECIAL
= N_P64
14039 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14041 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14042 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14043 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14044 #define N_S_32 (N_S8 | N_S16 | N_S32)
14045 #define N_F_16_32 (N_F16 | N_F32)
14046 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14047 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14048 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14049 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14051 /* Pass this as the first type argument to neon_check_type to ignore types
14053 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14055 /* Select a "shape" for the current instruction (describing register types or
14056 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14057 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14058 function of operand parsing, so this function doesn't need to be called.
14059 Shapes should be listed in order of decreasing length. */
14061 static enum neon_shape
14062 neon_select_shape (enum neon_shape shape
, ...)
14065 enum neon_shape first_shape
= shape
;
14067 /* Fix missing optional operands. FIXME: we don't know at this point how
14068 many arguments we should have, so this makes the assumption that we have
14069 > 1. This is true of all current Neon opcodes, I think, but may not be
14070 true in the future. */
14071 if (!inst
.operands
[1].present
)
14072 inst
.operands
[1] = inst
.operands
[0];
14074 va_start (ap
, shape
);
14076 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
14081 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
14083 if (!inst
.operands
[j
].present
)
14089 switch (neon_shape_tab
[shape
].el
[j
])
14091 /* If a .f16, .16, .u16, .s16 type specifier is given over
14092 a VFP single precision register operand, it's essentially
14093 means only half of the register is used.
14095 If the type specifier is given after the mnemonics, the
14096 information is stored in inst.vectype. If the type specifier
14097 is given after register operand, the information is stored
14098 in inst.operands[].vectype.
14100 When there is only one type specifier, and all the register
14101 operands are the same type of hardware register, the type
14102 specifier applies to all register operands.
14104 If no type specifier is given, the shape is inferred from
14105 operand information.
14108 vadd.f16 s0, s1, s2: NS_HHH
14109 vabs.f16 s0, s1: NS_HH
14110 vmov.f16 s0, r1: NS_HR
14111 vmov.f16 r0, s1: NS_RH
14112 vcvt.f16 r0, s1: NS_RH
14113 vcvt.f16.s32 s2, s2, #29: NS_HFI
14114 vcvt.f16.s32 s2, s2: NS_HF
14117 if (!(inst
.operands
[j
].isreg
14118 && inst
.operands
[j
].isvec
14119 && inst
.operands
[j
].issingle
14120 && !inst
.operands
[j
].isquad
14121 && ((inst
.vectype
.elems
== 1
14122 && inst
.vectype
.el
[0].size
== 16)
14123 || (inst
.vectype
.elems
> 1
14124 && inst
.vectype
.el
[j
].size
== 16)
14125 || (inst
.vectype
.elems
== 0
14126 && inst
.operands
[j
].vectype
.type
!= NT_invtype
14127 && inst
.operands
[j
].vectype
.size
== 16))))
14132 if (!(inst
.operands
[j
].isreg
14133 && inst
.operands
[j
].isvec
14134 && inst
.operands
[j
].issingle
14135 && !inst
.operands
[j
].isquad
14136 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
14137 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
14138 || (inst
.vectype
.elems
== 0
14139 && (inst
.operands
[j
].vectype
.size
== 32
14140 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
14145 if (!(inst
.operands
[j
].isreg
14146 && inst
.operands
[j
].isvec
14147 && !inst
.operands
[j
].isquad
14148 && !inst
.operands
[j
].issingle
))
14153 if (!(inst
.operands
[j
].isreg
14154 && !inst
.operands
[j
].isvec
))
14159 if (!(inst
.operands
[j
].isreg
14160 && inst
.operands
[j
].isvec
14161 && inst
.operands
[j
].isquad
14162 && !inst
.operands
[j
].issingle
))
14167 if (!(!inst
.operands
[j
].isreg
14168 && !inst
.operands
[j
].isscalar
))
14173 if (!(!inst
.operands
[j
].isreg
14174 && inst
.operands
[j
].isscalar
))
14184 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
14185 /* We've matched all the entries in the shape table, and we don't
14186 have any left over operands which have not been matched. */
14192 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
14193 first_error (_("invalid instruction shape"));
14198 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14199 means the Q bit should be set). */
14202 neon_quad (enum neon_shape shape
)
14204 return neon_shape_class
[shape
] == SC_QUAD
;
14208 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
14211 /* Allow modification to be made to types which are constrained to be
14212 based on the key element, based on bits set alongside N_EQK. */
14213 if ((typebits
& N_EQK
) != 0)
14215 if ((typebits
& N_HLF
) != 0)
14217 else if ((typebits
& N_DBL
) != 0)
14219 if ((typebits
& N_SGN
) != 0)
14220 *g_type
= NT_signed
;
14221 else if ((typebits
& N_UNS
) != 0)
14222 *g_type
= NT_unsigned
;
14223 else if ((typebits
& N_INT
) != 0)
14224 *g_type
= NT_integer
;
14225 else if ((typebits
& N_FLT
) != 0)
14226 *g_type
= NT_float
;
14227 else if ((typebits
& N_SIZ
) != 0)
14228 *g_type
= NT_untyped
;
14232 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14233 operand type, i.e. the single type specified in a Neon instruction when it
14234 is the only one given. */
14236 static struct neon_type_el
14237 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
14239 struct neon_type_el dest
= *key
;
14241 gas_assert ((thisarg
& N_EQK
) != 0);
14243 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
14248 /* Convert Neon type and size into compact bitmask representation. */
14250 static enum neon_type_mask
14251 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
14258 case 8: return N_8
;
14259 case 16: return N_16
;
14260 case 32: return N_32
;
14261 case 64: return N_64
;
14269 case 8: return N_I8
;
14270 case 16: return N_I16
;
14271 case 32: return N_I32
;
14272 case 64: return N_I64
;
14280 case 16: return N_F16
;
14281 case 32: return N_F32
;
14282 case 64: return N_F64
;
14290 case 8: return N_P8
;
14291 case 16: return N_P16
;
14292 case 64: return N_P64
;
14300 case 8: return N_S8
;
14301 case 16: return N_S16
;
14302 case 32: return N_S32
;
14303 case 64: return N_S64
;
14311 case 8: return N_U8
;
14312 case 16: return N_U16
;
14313 case 32: return N_U32
;
14314 case 64: return N_U64
;
14325 /* Convert compact Neon bitmask type representation to a type and size. Only
14326 handles the case where a single bit is set in the mask. */
14329 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
14330 enum neon_type_mask mask
)
14332 if ((mask
& N_EQK
) != 0)
14335 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
14337 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
14339 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
14341 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
14346 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
14348 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
14349 *type
= NT_unsigned
;
14350 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
14351 *type
= NT_integer
;
14352 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
14353 *type
= NT_untyped
;
14354 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
14356 else if ((mask
& (N_F_ALL
)) != 0)
14364 /* Modify a bitmask of allowed types. This is only needed for type
14368 modify_types_allowed (unsigned allowed
, unsigned mods
)
14371 enum neon_el_type type
;
14377 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
14379 if (el_type_of_type_chk (&type
, &size
,
14380 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
14382 neon_modify_type_size (mods
, &type
, &size
);
14383 destmask
|= type_chk_of_el_type (type
, size
);
14390 /* Check type and return type classification.
14391 The manual states (paraphrase): If one datatype is given, it indicates the
14393 - the second operand, if there is one
14394 - the operand, if there is no second operand
14395 - the result, if there are no operands.
14396 This isn't quite good enough though, so we use a concept of a "key" datatype
14397 which is set on a per-instruction basis, which is the one which matters when
14398 only one data type is written.
14399 Note: this function has side-effects (e.g. filling in missing operands). All
14400 Neon instructions should call it before performing bit encoding. */
14402 static struct neon_type_el
14403 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
14406 unsigned i
, pass
, key_el
= 0;
14407 unsigned types
[NEON_MAX_TYPE_ELS
];
14408 enum neon_el_type k_type
= NT_invtype
;
14409 unsigned k_size
= -1u;
14410 struct neon_type_el badtype
= {NT_invtype
, -1};
14411 unsigned key_allowed
= 0;
14413 /* Optional registers in Neon instructions are always (not) in operand 1.
14414 Fill in the missing operand here, if it was omitted. */
14415 if (els
> 1 && !inst
.operands
[1].present
)
14416 inst
.operands
[1] = inst
.operands
[0];
14418 /* Suck up all the varargs. */
14420 for (i
= 0; i
< els
; i
++)
14422 unsigned thisarg
= va_arg (ap
, unsigned);
14423 if (thisarg
== N_IGNORE_TYPE
)
14428 types
[i
] = thisarg
;
14429 if ((thisarg
& N_KEY
) != 0)
14434 if (inst
.vectype
.elems
> 0)
14435 for (i
= 0; i
< els
; i
++)
14436 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
14438 first_error (_("types specified in both the mnemonic and operands"));
14442 /* Duplicate inst.vectype elements here as necessary.
14443 FIXME: No idea if this is exactly the same as the ARM assembler,
14444 particularly when an insn takes one register and one non-register
14446 if (inst
.vectype
.elems
== 1 && els
> 1)
14449 inst
.vectype
.elems
= els
;
14450 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
14451 for (j
= 0; j
< els
; j
++)
14453 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14456 else if (inst
.vectype
.elems
== 0 && els
> 0)
14459 /* No types were given after the mnemonic, so look for types specified
14460 after each operand. We allow some flexibility here; as long as the
14461 "key" operand has a type, we can infer the others. */
14462 for (j
= 0; j
< els
; j
++)
14463 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
14464 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
14466 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
14468 for (j
= 0; j
< els
; j
++)
14469 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
14470 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14475 first_error (_("operand types can't be inferred"));
14479 else if (inst
.vectype
.elems
!= els
)
14481 first_error (_("type specifier has the wrong number of parts"));
14485 for (pass
= 0; pass
< 2; pass
++)
14487 for (i
= 0; i
< els
; i
++)
14489 unsigned thisarg
= types
[i
];
14490 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14491 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14492 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14493 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14495 /* Decay more-specific signed & unsigned types to sign-insensitive
14496 integer types if sign-specific variants are unavailable. */
14497 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14498 && (types_allowed
& N_SU_ALL
) == 0)
14499 g_type
= NT_integer
;
14501 /* If only untyped args are allowed, decay any more specific types to
14502 them. Some instructions only care about signs for some element
14503 sizes, so handle that properly. */
14504 if (((types_allowed
& N_UNT
) == 0)
14505 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14506 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14507 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14508 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14509 g_type
= NT_untyped
;
14513 if ((thisarg
& N_KEY
) != 0)
14517 key_allowed
= thisarg
& ~N_KEY
;
14519 /* Check architecture constraint on FP16 extension. */
14521 && k_type
== NT_float
14522 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14524 inst
.error
= _(BAD_FP16
);
14531 if ((thisarg
& N_VFP
) != 0)
14533 enum neon_shape_el regshape
;
14534 unsigned regwidth
, match
;
14536 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14539 first_error (_("invalid instruction shape"));
14542 regshape
= neon_shape_tab
[ns
].el
[i
];
14543 regwidth
= neon_shape_el_size
[regshape
];
14545 /* In VFP mode, operands must match register widths. If we
14546 have a key operand, use its width, else use the width of
14547 the current operand. */
14553 /* FP16 will use a single precision register. */
14554 if (regwidth
== 32 && match
== 16)
14556 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14560 inst
.error
= _(BAD_FP16
);
14565 if (regwidth
!= match
)
14567 first_error (_("operand size must match register width"));
14572 if ((thisarg
& N_EQK
) == 0)
14574 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14576 if ((given_type
& types_allowed
) == 0)
14578 first_error (_("bad type in Neon instruction"));
14584 enum neon_el_type mod_k_type
= k_type
;
14585 unsigned mod_k_size
= k_size
;
14586 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14587 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14589 first_error (_("inconsistent types in Neon instruction"));
14597 return inst
.vectype
.el
[key_el
];
14600 /* Neon-style VFP instruction forwarding. */
14602 /* Thumb VFP instructions have 0xE in the condition field. */
14605 do_vfp_cond_or_thumb (void)
14610 inst
.instruction
|= 0xe0000000;
14612 inst
.instruction
|= inst
.cond
<< 28;
14615 /* Look up and encode a simple mnemonic, for use as a helper function for the
14616 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14617 etc. It is assumed that operand parsing has already been done, and that the
14618 operands are in the form expected by the given opcode (this isn't necessarily
14619 the same as the form in which they were parsed, hence some massaging must
14620 take place before this function is called).
14621 Checks current arch version against that in the looked-up opcode. */
14624 do_vfp_nsyn_opcode (const char *opname
)
14626 const struct asm_opcode
*opcode
;
14628 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14633 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14634 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14641 inst
.instruction
= opcode
->tvalue
;
14642 opcode
->tencode ();
14646 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14647 opcode
->aencode ();
14652 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14654 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14656 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14659 do_vfp_nsyn_opcode ("fadds");
14661 do_vfp_nsyn_opcode ("fsubs");
14663 /* ARMv8.2 fp16 instruction. */
14665 do_scalar_fp16_v82_encode ();
14670 do_vfp_nsyn_opcode ("faddd");
14672 do_vfp_nsyn_opcode ("fsubd");
14676 /* Check operand types to see if this is a VFP instruction, and if so call
14680 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14682 enum neon_shape rs
;
14683 struct neon_type_el et
;
14688 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14689 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14693 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14694 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14695 N_F_ALL
| N_KEY
| N_VFP
);
14702 if (et
.type
!= NT_invtype
)
14713 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14715 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14717 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14720 do_vfp_nsyn_opcode ("fmacs");
14722 do_vfp_nsyn_opcode ("fnmacs");
14724 /* ARMv8.2 fp16 instruction. */
14726 do_scalar_fp16_v82_encode ();
14731 do_vfp_nsyn_opcode ("fmacd");
14733 do_vfp_nsyn_opcode ("fnmacd");
14738 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14740 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14742 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14745 do_vfp_nsyn_opcode ("ffmas");
14747 do_vfp_nsyn_opcode ("ffnmas");
14749 /* ARMv8.2 fp16 instruction. */
14751 do_scalar_fp16_v82_encode ();
14756 do_vfp_nsyn_opcode ("ffmad");
14758 do_vfp_nsyn_opcode ("ffnmad");
14763 do_vfp_nsyn_mul (enum neon_shape rs
)
14765 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14767 do_vfp_nsyn_opcode ("fmuls");
14769 /* ARMv8.2 fp16 instruction. */
14771 do_scalar_fp16_v82_encode ();
14774 do_vfp_nsyn_opcode ("fmuld");
14778 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14780 int is_neg
= (inst
.instruction
& 0x80) != 0;
14781 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14783 if (rs
== NS_FF
|| rs
== NS_HH
)
14786 do_vfp_nsyn_opcode ("fnegs");
14788 do_vfp_nsyn_opcode ("fabss");
14790 /* ARMv8.2 fp16 instruction. */
14792 do_scalar_fp16_v82_encode ();
14797 do_vfp_nsyn_opcode ("fnegd");
14799 do_vfp_nsyn_opcode ("fabsd");
14803 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14804 insns belong to Neon, and are handled elsewhere. */
14807 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14809 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14813 do_vfp_nsyn_opcode ("fldmdbs");
14815 do_vfp_nsyn_opcode ("fldmias");
14820 do_vfp_nsyn_opcode ("fstmdbs");
14822 do_vfp_nsyn_opcode ("fstmias");
14827 do_vfp_nsyn_sqrt (void)
14829 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14830 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14832 if (rs
== NS_FF
|| rs
== NS_HH
)
14834 do_vfp_nsyn_opcode ("fsqrts");
14836 /* ARMv8.2 fp16 instruction. */
14838 do_scalar_fp16_v82_encode ();
14841 do_vfp_nsyn_opcode ("fsqrtd");
14845 do_vfp_nsyn_div (void)
14847 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14848 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14849 N_F_ALL
| N_KEY
| N_VFP
);
14851 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14853 do_vfp_nsyn_opcode ("fdivs");
14855 /* ARMv8.2 fp16 instruction. */
14857 do_scalar_fp16_v82_encode ();
14860 do_vfp_nsyn_opcode ("fdivd");
14864 do_vfp_nsyn_nmul (void)
14866 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14867 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14868 N_F_ALL
| N_KEY
| N_VFP
);
14870 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14872 NEON_ENCODE (SINGLE
, inst
);
14873 do_vfp_sp_dyadic ();
14875 /* ARMv8.2 fp16 instruction. */
14877 do_scalar_fp16_v82_encode ();
14881 NEON_ENCODE (DOUBLE
, inst
);
14882 do_vfp_dp_rd_rn_rm ();
14884 do_vfp_cond_or_thumb ();
14889 do_vfp_nsyn_cmp (void)
14891 enum neon_shape rs
;
14892 if (inst
.operands
[1].isreg
)
14894 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14895 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14897 if (rs
== NS_FF
|| rs
== NS_HH
)
14899 NEON_ENCODE (SINGLE
, inst
);
14900 do_vfp_sp_monadic ();
14904 NEON_ENCODE (DOUBLE
, inst
);
14905 do_vfp_dp_rd_rm ();
14910 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14911 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14913 switch (inst
.instruction
& 0x0fffffff)
14916 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14919 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14925 if (rs
== NS_FI
|| rs
== NS_HI
)
14927 NEON_ENCODE (SINGLE
, inst
);
14928 do_vfp_sp_compare_z ();
14932 NEON_ENCODE (DOUBLE
, inst
);
14936 do_vfp_cond_or_thumb ();
14938 /* ARMv8.2 fp16 instruction. */
14939 if (rs
== NS_HI
|| rs
== NS_HH
)
14940 do_scalar_fp16_v82_encode ();
14944 nsyn_insert_sp (void)
14946 inst
.operands
[1] = inst
.operands
[0];
14947 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14948 inst
.operands
[0].reg
= REG_SP
;
14949 inst
.operands
[0].isreg
= 1;
14950 inst
.operands
[0].writeback
= 1;
14951 inst
.operands
[0].present
= 1;
14955 do_vfp_nsyn_push (void)
14959 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14960 _("register list must contain at least 1 and at most 16 "
14963 if (inst
.operands
[1].issingle
)
14964 do_vfp_nsyn_opcode ("fstmdbs");
14966 do_vfp_nsyn_opcode ("fstmdbd");
14970 do_vfp_nsyn_pop (void)
14974 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14975 _("register list must contain at least 1 and at most 16 "
14978 if (inst
.operands
[1].issingle
)
14979 do_vfp_nsyn_opcode ("fldmias");
14981 do_vfp_nsyn_opcode ("fldmiad");
14984 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14985 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14988 neon_dp_fixup (struct arm_it
* insn
)
14990 unsigned int i
= insn
->instruction
;
14995 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15006 insn
->instruction
= i
;
15009 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15013 neon_logbits (unsigned x
)
15015 return ffs (x
) - 4;
15018 #define LOW4(R) ((R) & 0xf)
15019 #define HI1(R) (((R) >> 4) & 1)
15021 /* Encode insns with bit pattern:
15023 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15024 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15026 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15027 different meaning for some instruction. */
15030 neon_three_same (int isquad
, int ubit
, int size
)
15032 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15033 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15034 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15035 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15036 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15037 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15038 inst
.instruction
|= (isquad
!= 0) << 6;
15039 inst
.instruction
|= (ubit
!= 0) << 24;
15041 inst
.instruction
|= neon_logbits (size
) << 20;
15043 neon_dp_fixup (&inst
);
15046 /* Encode instructions of the form:
15048 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15049 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15051 Don't write size if SIZE == -1. */
15054 neon_two_same (int qbit
, int ubit
, int size
)
15056 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15057 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15058 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15059 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15060 inst
.instruction
|= (qbit
!= 0) << 6;
15061 inst
.instruction
|= (ubit
!= 0) << 24;
15064 inst
.instruction
|= neon_logbits (size
) << 18;
15066 neon_dp_fixup (&inst
);
15069 /* Neon instruction encoders, in approximate order of appearance. */
15072 do_neon_dyadic_i_su (void)
15074 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15075 struct neon_type_el et
= neon_check_type (3, rs
,
15076 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
15077 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15081 do_neon_dyadic_i64_su (void)
15083 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15084 struct neon_type_el et
= neon_check_type (3, rs
,
15085 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15086 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15090 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
15093 unsigned size
= et
.size
>> 3;
15094 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15095 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15096 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15097 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15098 inst
.instruction
|= (isquad
!= 0) << 6;
15099 inst
.instruction
|= immbits
<< 16;
15100 inst
.instruction
|= (size
>> 3) << 7;
15101 inst
.instruction
|= (size
& 0x7) << 19;
15103 inst
.instruction
|= (uval
!= 0) << 24;
15105 neon_dp_fixup (&inst
);
15109 do_neon_shl_imm (void)
15111 if (!inst
.operands
[2].isreg
)
15113 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15114 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
15115 int imm
= inst
.operands
[2].imm
;
15117 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15118 _("immediate out of range for shift"));
15119 NEON_ENCODE (IMMED
, inst
);
15120 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15124 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15125 struct neon_type_el et
= neon_check_type (3, rs
,
15126 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15129 /* VSHL/VQSHL 3-register variants have syntax such as:
15131 whereas other 3-register operations encoded by neon_three_same have
15134 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15136 tmp
= inst
.operands
[2].reg
;
15137 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15138 inst
.operands
[1].reg
= tmp
;
15139 NEON_ENCODE (INTEGER
, inst
);
15140 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15145 do_neon_qshl_imm (void)
15147 if (!inst
.operands
[2].isreg
)
15149 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15150 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15151 int imm
= inst
.operands
[2].imm
;
15153 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15154 _("immediate out of range for shift"));
15155 NEON_ENCODE (IMMED
, inst
);
15156 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
15160 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15161 struct neon_type_el et
= neon_check_type (3, rs
,
15162 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15165 /* See note in do_neon_shl_imm. */
15166 tmp
= inst
.operands
[2].reg
;
15167 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15168 inst
.operands
[1].reg
= tmp
;
15169 NEON_ENCODE (INTEGER
, inst
);
15170 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15175 do_neon_rshl (void)
15177 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15178 struct neon_type_el et
= neon_check_type (3, rs
,
15179 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15182 tmp
= inst
.operands
[2].reg
;
15183 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15184 inst
.operands
[1].reg
= tmp
;
15185 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15189 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
15191 /* Handle .I8 pseudo-instructions. */
15194 /* Unfortunately, this will make everything apart from zero out-of-range.
15195 FIXME is this the intended semantics? There doesn't seem much point in
15196 accepting .I8 if so. */
15197 immediate
|= immediate
<< 8;
15203 if (immediate
== (immediate
& 0x000000ff))
15205 *immbits
= immediate
;
15208 else if (immediate
== (immediate
& 0x0000ff00))
15210 *immbits
= immediate
>> 8;
15213 else if (immediate
== (immediate
& 0x00ff0000))
15215 *immbits
= immediate
>> 16;
15218 else if (immediate
== (immediate
& 0xff000000))
15220 *immbits
= immediate
>> 24;
15223 if ((immediate
& 0xffff) != (immediate
>> 16))
15224 goto bad_immediate
;
15225 immediate
&= 0xffff;
15228 if (immediate
== (immediate
& 0x000000ff))
15230 *immbits
= immediate
;
15233 else if (immediate
== (immediate
& 0x0000ff00))
15235 *immbits
= immediate
>> 8;
15240 first_error (_("immediate value out of range"));
15245 do_neon_logic (void)
15247 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
15249 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15250 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15251 /* U bit and size field were set as part of the bitmask. */
15252 NEON_ENCODE (INTEGER
, inst
);
15253 neon_three_same (neon_quad (rs
), 0, -1);
15257 const int three_ops_form
= (inst
.operands
[2].present
15258 && !inst
.operands
[2].isreg
);
15259 const int immoperand
= (three_ops_form
? 2 : 1);
15260 enum neon_shape rs
= (three_ops_form
15261 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
15262 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
15263 struct neon_type_el et
= neon_check_type (2, rs
,
15264 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15265 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
15269 if (et
.type
== NT_invtype
)
15272 if (three_ops_form
)
15273 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15274 _("first and second operands shall be the same register"));
15276 NEON_ENCODE (IMMED
, inst
);
15278 immbits
= inst
.operands
[immoperand
].imm
;
15281 /* .i64 is a pseudo-op, so the immediate must be a repeating
15283 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
15284 inst
.operands
[immoperand
].reg
: 0))
15286 /* Set immbits to an invalid constant. */
15287 immbits
= 0xdeadbeef;
15294 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15298 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15302 /* Pseudo-instruction for VBIC. */
15303 neon_invert_size (&immbits
, 0, et
.size
);
15304 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15308 /* Pseudo-instruction for VORR. */
15309 neon_invert_size (&immbits
, 0, et
.size
);
15310 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15320 inst
.instruction
|= neon_quad (rs
) << 6;
15321 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15322 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15323 inst
.instruction
|= cmode
<< 8;
15324 neon_write_immbits (immbits
);
15326 neon_dp_fixup (&inst
);
15331 do_neon_bitfield (void)
15333 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15334 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15335 neon_three_same (neon_quad (rs
), 0, -1);
15339 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
15342 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15343 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
15345 if (et
.type
== NT_float
)
15347 NEON_ENCODE (FLOAT
, inst
);
15348 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15352 NEON_ENCODE (INTEGER
, inst
);
15353 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
15358 do_neon_dyadic_if_su (void)
15360 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15364 do_neon_dyadic_if_su_d (void)
15366 /* This version only allow D registers, but that constraint is enforced during
15367 operand parsing so we don't need to do anything extra here. */
15368 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15372 do_neon_dyadic_if_i_d (void)
15374 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15375 affected if we specify unsigned args. */
15376 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15379 enum vfp_or_neon_is_neon_bits
15382 NEON_CHECK_ARCH
= 2,
15383 NEON_CHECK_ARCH8
= 4
15386 /* Call this function if an instruction which may have belonged to the VFP or
15387 Neon instruction sets, but turned out to be a Neon instruction (due to the
15388 operand types involved, etc.). We have to check and/or fix-up a couple of
15391 - Make sure the user hasn't attempted to make a Neon instruction
15393 - Alter the value in the condition code field if necessary.
15394 - Make sure that the arch supports Neon instructions.
15396 Which of these operations take place depends on bits from enum
15397 vfp_or_neon_is_neon_bits.
15399 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15400 current instruction's condition is COND_ALWAYS, the condition field is
15401 changed to inst.uncond_value. This is necessary because instructions shared
15402 between VFP and Neon may be conditional for the VFP variants only, and the
15403 unconditional Neon version must have, e.g., 0xF in the condition field. */
15406 vfp_or_neon_is_neon (unsigned check
)
15408 /* Conditions are always legal in Thumb mode (IT blocks). */
15409 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
15411 if (inst
.cond
!= COND_ALWAYS
)
15413 first_error (_(BAD_COND
));
15416 if (inst
.uncond_value
!= -1)
15417 inst
.instruction
|= inst
.uncond_value
<< 28;
15420 if ((check
& NEON_CHECK_ARCH
)
15421 && !mark_feature_used (&fpu_neon_ext_v1
))
15423 first_error (_(BAD_FPU
));
15427 if ((check
& NEON_CHECK_ARCH8
)
15428 && !mark_feature_used (&fpu_neon_ext_armv8
))
15430 first_error (_(BAD_FPU
));
15438 do_neon_addsub_if_i (void)
15440 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
15443 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15446 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15447 affected if we specify unsigned args. */
15448 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
15451 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15453 V<op> A,B (A is operand 0, B is operand 2)
15458 so handle that case specially. */
15461 neon_exchange_operands (void)
15463 if (inst
.operands
[1].present
)
15465 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
15467 /* Swap operands[1] and operands[2]. */
15468 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
15469 inst
.operands
[1] = inst
.operands
[2];
15470 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
15475 inst
.operands
[1] = inst
.operands
[2];
15476 inst
.operands
[2] = inst
.operands
[0];
15481 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
15483 if (inst
.operands
[2].isreg
)
15486 neon_exchange_operands ();
15487 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
15491 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15492 struct neon_type_el et
= neon_check_type (2, rs
,
15493 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
15495 NEON_ENCODE (IMMED
, inst
);
15496 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15497 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15498 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15499 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15500 inst
.instruction
|= neon_quad (rs
) << 6;
15501 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15502 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15504 neon_dp_fixup (&inst
);
15511 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
15515 do_neon_cmp_inv (void)
15517 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
15523 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
15526 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15527 scalars, which are encoded in 5 bits, M : Rm.
15528 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15529 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15532 Dot Product instructions are similar to multiply instructions except elsize
15533 should always be 32.
15535 This function translates SCALAR, which is GAS's internal encoding of indexed
15536 scalar register, to raw encoding. There is also register and index range
15537 check based on ELSIZE. */
15540 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
15542 unsigned regno
= NEON_SCALAR_REG (scalar
);
15543 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15548 if (regno
> 7 || elno
> 3)
15550 return regno
| (elno
<< 3);
15553 if (regno
> 15 || elno
> 1)
15555 return regno
| (elno
<< 4);
15559 first_error (_("scalar out of range for multiply instruction"));
15565 /* Encode multiply / multiply-accumulate scalar instructions. */
15568 neon_mul_mac (struct neon_type_el et
, int ubit
)
15572 /* Give a more helpful error message if we have an invalid type. */
15573 if (et
.type
== NT_invtype
)
15576 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15577 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15578 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15579 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15580 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15581 inst
.instruction
|= LOW4 (scalar
);
15582 inst
.instruction
|= HI1 (scalar
) << 5;
15583 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15584 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15585 inst
.instruction
|= (ubit
!= 0) << 24;
15587 neon_dp_fixup (&inst
);
15591 do_neon_mac_maybe_scalar (void)
15593 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15596 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15599 if (inst
.operands
[2].isscalar
)
15601 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15602 struct neon_type_el et
= neon_check_type (3, rs
,
15603 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15604 NEON_ENCODE (SCALAR
, inst
);
15605 neon_mul_mac (et
, neon_quad (rs
));
15609 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15610 affected if we specify unsigned args. */
15611 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15616 do_neon_fmac (void)
15618 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15621 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15624 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15630 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15631 struct neon_type_el et
= neon_check_type (3, rs
,
15632 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15633 neon_three_same (neon_quad (rs
), 0, et
.size
);
15636 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15637 same types as the MAC equivalents. The polynomial type for this instruction
15638 is encoded the same as the integer type. */
15643 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15646 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15649 if (inst
.operands
[2].isscalar
)
15650 do_neon_mac_maybe_scalar ();
15652 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15656 do_neon_qdmulh (void)
15658 if (inst
.operands
[2].isscalar
)
15660 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15661 struct neon_type_el et
= neon_check_type (3, rs
,
15662 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15663 NEON_ENCODE (SCALAR
, inst
);
15664 neon_mul_mac (et
, neon_quad (rs
));
15668 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15669 struct neon_type_el et
= neon_check_type (3, rs
,
15670 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15671 NEON_ENCODE (INTEGER
, inst
);
15672 /* The U bit (rounding) comes from bit mask. */
15673 neon_three_same (neon_quad (rs
), 0, et
.size
);
15678 do_neon_qrdmlah (void)
15680 /* Check we're on the correct architecture. */
15681 if (!mark_feature_used (&fpu_neon_ext_armv8
))
15683 _("instruction form not available on this architecture.");
15684 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
15686 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15687 record_feature_use (&fpu_neon_ext_v8_1
);
15690 if (inst
.operands
[2].isscalar
)
15692 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15693 struct neon_type_el et
= neon_check_type (3, rs
,
15694 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15695 NEON_ENCODE (SCALAR
, inst
);
15696 neon_mul_mac (et
, neon_quad (rs
));
15700 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15701 struct neon_type_el et
= neon_check_type (3, rs
,
15702 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15703 NEON_ENCODE (INTEGER
, inst
);
15704 /* The U bit (rounding) comes from bit mask. */
15705 neon_three_same (neon_quad (rs
), 0, et
.size
);
15710 do_neon_fcmp_absolute (void)
15712 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15713 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15714 N_F_16_32
| N_KEY
);
15715 /* Size field comes from bit mask. */
15716 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
15720 do_neon_fcmp_absolute_inv (void)
15722 neon_exchange_operands ();
15723 do_neon_fcmp_absolute ();
15727 do_neon_step (void)
15729 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15730 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15731 N_F_16_32
| N_KEY
);
15732 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15736 do_neon_abs_neg (void)
15738 enum neon_shape rs
;
15739 struct neon_type_el et
;
15741 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15744 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15747 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15748 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
15750 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15751 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15752 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15753 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15754 inst
.instruction
|= neon_quad (rs
) << 6;
15755 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15756 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15758 neon_dp_fixup (&inst
);
15764 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15765 struct neon_type_el et
= neon_check_type (2, rs
,
15766 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15767 int imm
= inst
.operands
[2].imm
;
15768 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15769 _("immediate out of range for insert"));
15770 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15776 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15777 struct neon_type_el et
= neon_check_type (2, rs
,
15778 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15779 int imm
= inst
.operands
[2].imm
;
15780 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15781 _("immediate out of range for insert"));
15782 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15786 do_neon_qshlu_imm (void)
15788 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15789 struct neon_type_el et
= neon_check_type (2, rs
,
15790 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15791 int imm
= inst
.operands
[2].imm
;
15792 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15793 _("immediate out of range for shift"));
15794 /* Only encodes the 'U present' variant of the instruction.
15795 In this case, signed types have OP (bit 8) set to 0.
15796 Unsigned types have OP set to 1. */
15797 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15798 /* The rest of the bits are the same as other immediate shifts. */
15799 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15803 do_neon_qmovn (void)
15805 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15806 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15807 /* Saturating move where operands can be signed or unsigned, and the
15808 destination has the same signedness. */
15809 NEON_ENCODE (INTEGER
, inst
);
15810 if (et
.type
== NT_unsigned
)
15811 inst
.instruction
|= 0xc0;
15813 inst
.instruction
|= 0x80;
15814 neon_two_same (0, 1, et
.size
/ 2);
15818 do_neon_qmovun (void)
15820 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15821 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15822 /* Saturating move with unsigned results. Operands must be signed. */
15823 NEON_ENCODE (INTEGER
, inst
);
15824 neon_two_same (0, 1, et
.size
/ 2);
15828 do_neon_rshift_sat_narrow (void)
15830 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15831 or unsigned. If operands are unsigned, results must also be unsigned. */
15832 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15833 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15834 int imm
= inst
.operands
[2].imm
;
15835 /* This gets the bounds check, size encoding and immediate bits calculation
15839 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15840 VQMOVN.I<size> <Dd>, <Qm>. */
15843 inst
.operands
[2].present
= 0;
15844 inst
.instruction
= N_MNEM_vqmovn
;
15849 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15850 _("immediate out of range"));
15851 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15855 do_neon_rshift_sat_narrow_u (void)
15857 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15858 or unsigned. If operands are unsigned, results must also be unsigned. */
15859 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15860 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15861 int imm
= inst
.operands
[2].imm
;
15862 /* This gets the bounds check, size encoding and immediate bits calculation
15866 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15867 VQMOVUN.I<size> <Dd>, <Qm>. */
15870 inst
.operands
[2].present
= 0;
15871 inst
.instruction
= N_MNEM_vqmovun
;
15876 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15877 _("immediate out of range"));
15878 /* FIXME: The manual is kind of unclear about what value U should have in
15879 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15881 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15885 do_neon_movn (void)
15887 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15888 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15889 NEON_ENCODE (INTEGER
, inst
);
15890 neon_two_same (0, 1, et
.size
/ 2);
15894 do_neon_rshift_narrow (void)
15896 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15897 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15898 int imm
= inst
.operands
[2].imm
;
15899 /* This gets the bounds check, size encoding and immediate bits calculation
15903 /* If immediate is zero then we are a pseudo-instruction for
15904 VMOVN.I<size> <Dd>, <Qm> */
15907 inst
.operands
[2].present
= 0;
15908 inst
.instruction
= N_MNEM_vmovn
;
15913 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15914 _("immediate out of range for narrowing operation"));
15915 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15919 do_neon_shll (void)
15921 /* FIXME: Type checking when lengthening. */
15922 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15923 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15924 unsigned imm
= inst
.operands
[2].imm
;
15926 if (imm
== et
.size
)
15928 /* Maximum shift variant. */
15929 NEON_ENCODE (INTEGER
, inst
);
15930 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15931 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15932 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15933 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15934 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15936 neon_dp_fixup (&inst
);
15940 /* A more-specific type check for non-max versions. */
15941 et
= neon_check_type (2, NS_QDI
,
15942 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15943 NEON_ENCODE (IMMED
, inst
);
15944 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15948 /* Check the various types for the VCVT instruction, and return which version
15949 the current instruction is. */
15951 #define CVT_FLAVOUR_VAR \
15952 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15953 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15954 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15955 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15956 /* Half-precision conversions. */ \
15957 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15958 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15959 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15960 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15961 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15962 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15963 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15964 Compared with single/double precision variants, only the co-processor \
15965 field is different, so the encoding flow is reused here. */ \
15966 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15967 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15968 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15969 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15970 /* VFP instructions. */ \
15971 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15972 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15973 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15974 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15975 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15976 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15977 /* VFP instructions with bitshift. */ \
15978 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15979 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15980 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15981 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15982 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15983 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15984 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15985 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15987 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15988 neon_cvt_flavour_##C,
15990 /* The different types of conversions we can do. */
15991 enum neon_cvt_flavour
15994 neon_cvt_flavour_invalid
,
15995 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
16000 static enum neon_cvt_flavour
16001 get_neon_cvt_flavour (enum neon_shape rs
)
16003 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16004 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16005 if (et.type != NT_invtype) \
16007 inst.error = NULL; \
16008 return (neon_cvt_flavour_##C); \
16011 struct neon_type_el et
;
16012 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
16013 || rs
== NS_FF
) ? N_VFP
: 0;
16014 /* The instruction versions which take an immediate take one register
16015 argument, which is extended to the width of the full register. Thus the
16016 "source" and "destination" registers must have the same width. Hack that
16017 here by making the size equal to the key (wider, in this case) operand. */
16018 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
16022 return neon_cvt_flavour_invalid
;
16037 /* Neon-syntax VFP conversions. */
16040 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
16042 const char *opname
= 0;
16044 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
16045 || rs
== NS_FHI
|| rs
== NS_HFI
)
16047 /* Conversions with immediate bitshift. */
16048 const char *enc
[] =
16050 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16056 if (flavour
< (int) ARRAY_SIZE (enc
))
16058 opname
= enc
[flavour
];
16059 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
16060 _("operands 0 and 1 must be the same register"));
16061 inst
.operands
[1] = inst
.operands
[2];
16062 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
16067 /* Conversions without bitshift. */
16068 const char *enc
[] =
16070 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16076 if (flavour
< (int) ARRAY_SIZE (enc
))
16077 opname
= enc
[flavour
];
16081 do_vfp_nsyn_opcode (opname
);
16083 /* ARMv8.2 fp16 VCVT instruction. */
16084 if (flavour
== neon_cvt_flavour_s32_f16
16085 || flavour
== neon_cvt_flavour_u32_f16
16086 || flavour
== neon_cvt_flavour_f16_u32
16087 || flavour
== neon_cvt_flavour_f16_s32
)
16088 do_scalar_fp16_v82_encode ();
16092 do_vfp_nsyn_cvtz (void)
16094 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
16095 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16096 const char *enc
[] =
16098 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16104 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
16105 do_vfp_nsyn_opcode (enc
[flavour
]);
16109 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
16110 enum neon_cvt_mode mode
)
16115 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16116 D register operands. */
16117 if (flavour
== neon_cvt_flavour_s32_f64
16118 || flavour
== neon_cvt_flavour_u32_f64
)
16119 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16122 if (flavour
== neon_cvt_flavour_s32_f16
16123 || flavour
== neon_cvt_flavour_u32_f16
)
16124 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
16127 set_it_insn_type (OUTSIDE_IT_INSN
);
16131 case neon_cvt_flavour_s32_f64
:
16135 case neon_cvt_flavour_s32_f32
:
16139 case neon_cvt_flavour_s32_f16
:
16143 case neon_cvt_flavour_u32_f64
:
16147 case neon_cvt_flavour_u32_f32
:
16151 case neon_cvt_flavour_u32_f16
:
16156 first_error (_("invalid instruction shape"));
16162 case neon_cvt_mode_a
: rm
= 0; break;
16163 case neon_cvt_mode_n
: rm
= 1; break;
16164 case neon_cvt_mode_p
: rm
= 2; break;
16165 case neon_cvt_mode_m
: rm
= 3; break;
16166 default: first_error (_("invalid rounding mode")); return;
16169 NEON_ENCODE (FPV8
, inst
);
16170 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
16171 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
16172 inst
.instruction
|= sz
<< 8;
16174 /* ARMv8.2 fp16 VCVT instruction. */
16175 if (flavour
== neon_cvt_flavour_s32_f16
16176 ||flavour
== neon_cvt_flavour_u32_f16
)
16177 do_scalar_fp16_v82_encode ();
16178 inst
.instruction
|= op
<< 7;
16179 inst
.instruction
|= rm
<< 16;
16180 inst
.instruction
|= 0xf0000000;
16181 inst
.is_neon
= TRUE
;
16185 do_neon_cvt_1 (enum neon_cvt_mode mode
)
16187 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
16188 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
16189 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
16191 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16193 if (flavour
== neon_cvt_flavour_invalid
)
16196 /* PR11109: Handle round-to-zero for VCVT conversions. */
16197 if (mode
== neon_cvt_mode_z
16198 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
16199 && (flavour
== neon_cvt_flavour_s16_f16
16200 || flavour
== neon_cvt_flavour_u16_f16
16201 || flavour
== neon_cvt_flavour_s32_f32
16202 || flavour
== neon_cvt_flavour_u32_f32
16203 || flavour
== neon_cvt_flavour_s32_f64
16204 || flavour
== neon_cvt_flavour_u32_f64
)
16205 && (rs
== NS_FD
|| rs
== NS_FF
))
16207 do_vfp_nsyn_cvtz ();
16211 /* ARMv8.2 fp16 VCVT conversions. */
16212 if (mode
== neon_cvt_mode_z
16213 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
16214 && (flavour
== neon_cvt_flavour_s32_f16
16215 || flavour
== neon_cvt_flavour_u32_f16
)
16218 do_vfp_nsyn_cvtz ();
16219 do_scalar_fp16_v82_encode ();
16223 /* VFP rather than Neon conversions. */
16224 if (flavour
>= neon_cvt_flavour_first_fp
)
16226 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16227 do_vfp_nsyn_cvt (rs
, flavour
);
16229 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16240 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16241 0x0000100, 0x1000100, 0x0, 0x1000000};
16243 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16246 /* Fixed-point conversion with #0 immediate is encoded as an
16247 integer conversion. */
16248 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
16250 NEON_ENCODE (IMMED
, inst
);
16251 if (flavour
!= neon_cvt_flavour_invalid
)
16252 inst
.instruction
|= enctab
[flavour
];
16253 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16254 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16255 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16256 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16257 inst
.instruction
|= neon_quad (rs
) << 6;
16258 inst
.instruction
|= 1 << 21;
16259 if (flavour
< neon_cvt_flavour_s16_f16
)
16261 inst
.instruction
|= 1 << 21;
16262 immbits
= 32 - inst
.operands
[2].imm
;
16263 inst
.instruction
|= immbits
<< 16;
16267 inst
.instruction
|= 3 << 20;
16268 immbits
= 16 - inst
.operands
[2].imm
;
16269 inst
.instruction
|= immbits
<< 16;
16270 inst
.instruction
&= ~(1 << 9);
16273 neon_dp_fixup (&inst
);
16279 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
16281 NEON_ENCODE (FLOAT
, inst
);
16282 set_it_insn_type (OUTSIDE_IT_INSN
);
16284 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16287 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16288 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16289 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16290 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16291 inst
.instruction
|= neon_quad (rs
) << 6;
16292 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
16293 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
16294 inst
.instruction
|= mode
<< 8;
16295 if (flavour
== neon_cvt_flavour_u16_f16
16296 || flavour
== neon_cvt_flavour_s16_f16
)
16297 /* Mask off the original size bits and reencode them. */
16298 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
16301 inst
.instruction
|= 0xfc000000;
16303 inst
.instruction
|= 0xf0000000;
16309 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
16310 0x100, 0x180, 0x0, 0x080};
16312 NEON_ENCODE (INTEGER
, inst
);
16314 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16317 if (flavour
!= neon_cvt_flavour_invalid
)
16318 inst
.instruction
|= enctab
[flavour
];
16320 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16321 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16322 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16323 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16324 inst
.instruction
|= neon_quad (rs
) << 6;
16325 if (flavour
>= neon_cvt_flavour_s16_f16
16326 && flavour
<= neon_cvt_flavour_f16_u16
)
16327 /* Half precision. */
16328 inst
.instruction
|= 1 << 18;
16330 inst
.instruction
|= 2 << 18;
16332 neon_dp_fixup (&inst
);
16337 /* Half-precision conversions for Advanced SIMD -- neon. */
16340 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16344 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
16346 as_bad (_("operand size must match register width"));
16351 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
16353 as_bad (_("operand size must match register width"));
16358 inst
.instruction
= 0x3b60600;
16360 inst
.instruction
= 0x3b60700;
16362 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16363 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16364 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16365 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16366 neon_dp_fixup (&inst
);
16370 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16371 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16372 do_vfp_nsyn_cvt (rs
, flavour
);
16374 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16379 do_neon_cvtr (void)
16381 do_neon_cvt_1 (neon_cvt_mode_x
);
16387 do_neon_cvt_1 (neon_cvt_mode_z
);
16391 do_neon_cvta (void)
16393 do_neon_cvt_1 (neon_cvt_mode_a
);
16397 do_neon_cvtn (void)
16399 do_neon_cvt_1 (neon_cvt_mode_n
);
16403 do_neon_cvtp (void)
16405 do_neon_cvt_1 (neon_cvt_mode_p
);
16409 do_neon_cvtm (void)
16411 do_neon_cvt_1 (neon_cvt_mode_m
);
16415 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
16418 mark_feature_used (&fpu_vfp_ext_armv8
);
16420 encode_arm_vfp_reg (inst
.operands
[0].reg
,
16421 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
16422 encode_arm_vfp_reg (inst
.operands
[1].reg
,
16423 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
16424 inst
.instruction
|= to
? 0x10000 : 0;
16425 inst
.instruction
|= t
? 0x80 : 0;
16426 inst
.instruction
|= is_double
? 0x100 : 0;
16427 do_vfp_cond_or_thumb ();
16431 do_neon_cvttb_1 (bfd_boolean t
)
16433 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
16434 NS_DF
, NS_DH
, NS_NULL
);
16438 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
16441 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
16443 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
16446 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
16448 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
16450 /* The VCVTB and VCVTT instructions with D-register operands
16451 don't work for SP only targets. */
16452 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16456 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
16458 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
16460 /* The VCVTB and VCVTT instructions with D-register operands
16461 don't work for SP only targets. */
16462 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16466 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
16473 do_neon_cvtb (void)
16475 do_neon_cvttb_1 (FALSE
);
16480 do_neon_cvtt (void)
16482 do_neon_cvttb_1 (TRUE
);
16486 neon_move_immediate (void)
16488 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
16489 struct neon_type_el et
= neon_check_type (2, rs
,
16490 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
16491 unsigned immlo
, immhi
= 0, immbits
;
16492 int op
, cmode
, float_p
;
16494 constraint (et
.type
== NT_invtype
,
16495 _("operand size must be specified for immediate VMOV"));
16497 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16498 op
= (inst
.instruction
& (1 << 5)) != 0;
16500 immlo
= inst
.operands
[1].imm
;
16501 if (inst
.operands
[1].regisimm
)
16502 immhi
= inst
.operands
[1].reg
;
16504 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
16505 _("immediate has bits set outside the operand size"));
16507 float_p
= inst
.operands
[1].immisfloat
;
16509 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
16510 et
.size
, et
.type
)) == FAIL
)
16512 /* Invert relevant bits only. */
16513 neon_invert_size (&immlo
, &immhi
, et
.size
);
16514 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16515 with one or the other; those cases are caught by
16516 neon_cmode_for_move_imm. */
16518 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
16519 &op
, et
.size
, et
.type
)) == FAIL
)
16521 first_error (_("immediate out of range"));
16526 inst
.instruction
&= ~(1 << 5);
16527 inst
.instruction
|= op
<< 5;
16529 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16530 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16531 inst
.instruction
|= neon_quad (rs
) << 6;
16532 inst
.instruction
|= cmode
<< 8;
16534 neon_write_immbits (immbits
);
16540 if (inst
.operands
[1].isreg
)
16542 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16544 NEON_ENCODE (INTEGER
, inst
);
16545 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16546 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16547 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16548 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16549 inst
.instruction
|= neon_quad (rs
) << 6;
16553 NEON_ENCODE (IMMED
, inst
);
16554 neon_move_immediate ();
16557 neon_dp_fixup (&inst
);
16560 /* Encode instructions of form:
16562 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16563 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16566 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16568 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16569 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16570 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16571 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16572 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16573 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16574 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16575 inst
.instruction
|= neon_logbits (size
) << 20;
16577 neon_dp_fixup (&inst
);
16581 do_neon_dyadic_long (void)
16583 /* FIXME: Type checking for lengthening op. */
16584 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16585 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16586 neon_mixed_length (et
, et
.size
);
16590 do_neon_abal (void)
16592 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16593 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16594 neon_mixed_length (et
, et
.size
);
16598 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16600 if (inst
.operands
[2].isscalar
)
16602 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16603 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16604 NEON_ENCODE (SCALAR
, inst
);
16605 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16609 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16610 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16611 NEON_ENCODE (INTEGER
, inst
);
16612 neon_mixed_length (et
, et
.size
);
16617 do_neon_mac_maybe_scalar_long (void)
16619 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16622 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16623 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16626 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
16628 unsigned regno
= NEON_SCALAR_REG (scalar
);
16629 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
16633 if (regno
> 7 || elno
> 3)
16636 return ((regno
& 0x7)
16637 | ((elno
& 0x1) << 3)
16638 | (((elno
>> 1) & 0x1) << 5));
16642 if (regno
> 15 || elno
> 1)
16645 return (((regno
& 0x1) << 5)
16646 | ((regno
>> 1) & 0x7)
16647 | ((elno
& 0x1) << 3));
16651 first_error (_("scalar out of range for multiply instruction"));
16656 do_neon_fmac_maybe_scalar_long (int subtype
)
16658 enum neon_shape rs
;
16660 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16661 field (bits[21:20]) has different meaning. For scalar index variant, it's
16662 used to differentiate add and subtract, otherwise it's with fixed value
16666 if (inst
.cond
!= COND_ALWAYS
)
16667 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16668 "behaviour is UNPREDICTABLE"));
16670 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
16673 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
16676 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16677 be a scalar index register. */
16678 if (inst
.operands
[2].isscalar
)
16680 high8
= 0xfe000000;
16683 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
16687 high8
= 0xfc000000;
16690 inst
.instruction
|= (0x1 << 23);
16691 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
16694 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
);
16696 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16697 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16698 so we simply pass -1 as size. */
16699 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
16700 neon_three_same (quad_p
, 0, size
);
16702 /* Undo neon_dp_fixup. Redo the high eight bits. */
16703 inst
.instruction
&= 0x00ffffff;
16704 inst
.instruction
|= high8
;
16706 #define LOW1(R) ((R) & 0x1)
16707 #define HI4(R) (((R) >> 1) & 0xf)
16708 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16709 whether the instruction is in Q form and whether Vm is a scalar indexed
16711 if (inst
.operands
[2].isscalar
)
16714 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
16715 inst
.instruction
&= 0xffffffd0;
16716 inst
.instruction
|= rm
;
16720 /* Redo Rn as well. */
16721 inst
.instruction
&= 0xfff0ff7f;
16722 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
16723 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
16728 /* Redo Rn and Rm. */
16729 inst
.instruction
&= 0xfff0ff50;
16730 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
16731 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
16732 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
16733 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
16738 do_neon_vfmal (void)
16740 return do_neon_fmac_maybe_scalar_long (0);
16744 do_neon_vfmsl (void)
16746 return do_neon_fmac_maybe_scalar_long (1);
16750 do_neon_dyadic_wide (void)
16752 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
16753 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16754 neon_mixed_length (et
, et
.size
);
16758 do_neon_dyadic_narrow (void)
16760 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16761 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
16762 /* Operand sign is unimportant, and the U bit is part of the opcode,
16763 so force the operand type to integer. */
16764 et
.type
= NT_integer
;
16765 neon_mixed_length (et
, et
.size
/ 2);
16769 do_neon_mul_sat_scalar_long (void)
16771 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
16775 do_neon_vmull (void)
16777 if (inst
.operands
[2].isscalar
)
16778 do_neon_mac_maybe_scalar_long ();
16781 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16782 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
16784 if (et
.type
== NT_poly
)
16785 NEON_ENCODE (POLY
, inst
);
16787 NEON_ENCODE (INTEGER
, inst
);
16789 /* For polynomial encoding the U bit must be zero, and the size must
16790 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16791 obviously, as 0b10). */
16794 /* Check we're on the correct architecture. */
16795 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
16797 _("Instruction form not available on this architecture.");
16802 neon_mixed_length (et
, et
.size
);
16809 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
16810 struct neon_type_el et
= neon_check_type (3, rs
,
16811 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16812 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
16814 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
16815 _("shift out of range"));
16816 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16817 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16818 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16819 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16820 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16821 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16822 inst
.instruction
|= neon_quad (rs
) << 6;
16823 inst
.instruction
|= imm
<< 8;
16825 neon_dp_fixup (&inst
);
16831 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16832 struct neon_type_el et
= neon_check_type (2, rs
,
16833 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16834 unsigned op
= (inst
.instruction
>> 7) & 3;
16835 /* N (width of reversed regions) is encoded as part of the bitmask. We
16836 extract it here to check the elements to be reversed are smaller.
16837 Otherwise we'd get a reserved instruction. */
16838 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
16839 gas_assert (elsize
!= 0);
16840 constraint (et
.size
>= elsize
,
16841 _("elements must be smaller than reversal region"));
16842 neon_two_same (neon_quad (rs
), 1, et
.size
);
16848 if (inst
.operands
[1].isscalar
)
16850 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16851 struct neon_type_el et
= neon_check_type (2, rs
,
16852 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16853 unsigned sizebits
= et
.size
>> 3;
16854 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16855 int logsize
= neon_logbits (et
.size
);
16856 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16858 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16861 NEON_ENCODE (SCALAR
, inst
);
16862 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16863 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16864 inst
.instruction
|= LOW4 (dm
);
16865 inst
.instruction
|= HI1 (dm
) << 5;
16866 inst
.instruction
|= neon_quad (rs
) << 6;
16867 inst
.instruction
|= x
<< 17;
16868 inst
.instruction
|= sizebits
<< 16;
16870 neon_dp_fixup (&inst
);
16874 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16875 struct neon_type_el et
= neon_check_type (2, rs
,
16876 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16877 /* Duplicate ARM register to lanes of vector. */
16878 NEON_ENCODE (ARMREG
, inst
);
16881 case 8: inst
.instruction
|= 0x400000; break;
16882 case 16: inst
.instruction
|= 0x000020; break;
16883 case 32: inst
.instruction
|= 0x000000; break;
16886 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16887 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16888 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16889 inst
.instruction
|= neon_quad (rs
) << 21;
16890 /* The encoding for this instruction is identical for the ARM and Thumb
16891 variants, except for the condition field. */
16892 do_vfp_cond_or_thumb ();
16896 /* VMOV has particularly many variations. It can be one of:
16897 0. VMOV<c><q> <Qd>, <Qm>
16898 1. VMOV<c><q> <Dd>, <Dm>
16899 (Register operations, which are VORR with Rm = Rn.)
16900 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16901 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16903 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16904 (ARM register to scalar.)
16905 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16906 (Two ARM registers to vector.)
16907 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16908 (Scalar to ARM register.)
16909 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16910 (Vector to two ARM registers.)
16911 8. VMOV.F32 <Sd>, <Sm>
16912 9. VMOV.F64 <Dd>, <Dm>
16913 (VFP register moves.)
16914 10. VMOV.F32 <Sd>, #imm
16915 11. VMOV.F64 <Dd>, #imm
16916 (VFP float immediate load.)
16917 12. VMOV <Rd>, <Sm>
16918 (VFP single to ARM reg.)
16919 13. VMOV <Sd>, <Rm>
16920 (ARM reg to VFP single.)
16921 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16922 (Two ARM regs to two VFP singles.)
16923 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16924 (Two VFP singles to two ARM regs.)
16926 These cases can be disambiguated using neon_select_shape, except cases 1/9
16927 and 3/11 which depend on the operand type too.
16929 All the encoded bits are hardcoded by this function.
16931 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16932 Cases 5, 7 may be used with VFPv2 and above.
16934 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16935 can specify a type where it doesn't make sense to, and is ignored). */
16940 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16941 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16942 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16943 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16944 struct neon_type_el et
;
16945 const char *ldconst
= 0;
16949 case NS_DD
: /* case 1/9. */
16950 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16951 /* It is not an error here if no type is given. */
16953 if (et
.type
== NT_float
&& et
.size
== 64)
16955 do_vfp_nsyn_opcode ("fcpyd");
16958 /* fall through. */
16960 case NS_QQ
: /* case 0/1. */
16962 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16964 /* The architecture manual I have doesn't explicitly state which
16965 value the U bit should have for register->register moves, but
16966 the equivalent VORR instruction has U = 0, so do that. */
16967 inst
.instruction
= 0x0200110;
16968 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16969 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16970 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16971 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16972 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16973 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16974 inst
.instruction
|= neon_quad (rs
) << 6;
16976 neon_dp_fixup (&inst
);
16980 case NS_DI
: /* case 3/11. */
16981 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16983 if (et
.type
== NT_float
&& et
.size
== 64)
16985 /* case 11 (fconstd). */
16986 ldconst
= "fconstd";
16987 goto encode_fconstd
;
16989 /* fall through. */
16991 case NS_QI
: /* case 2/3. */
16992 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16994 inst
.instruction
= 0x0800010;
16995 neon_move_immediate ();
16996 neon_dp_fixup (&inst
);
16999 case NS_SR
: /* case 4. */
17001 unsigned bcdebits
= 0;
17003 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
17004 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
17006 /* .<size> is optional here, defaulting to .32. */
17007 if (inst
.vectype
.elems
== 0
17008 && inst
.operands
[0].vectype
.type
== NT_invtype
17009 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17011 inst
.vectype
.el
[0].type
= NT_untyped
;
17012 inst
.vectype
.el
[0].size
= 32;
17013 inst
.vectype
.elems
= 1;
17016 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17017 logsize
= neon_logbits (et
.size
);
17019 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17021 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17022 && et
.size
!= 32, _(BAD_FPU
));
17023 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17024 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17028 case 8: bcdebits
= 0x8; break;
17029 case 16: bcdebits
= 0x1; break;
17030 case 32: bcdebits
= 0x0; break;
17034 bcdebits
|= x
<< logsize
;
17036 inst
.instruction
= 0xe000b10;
17037 do_vfp_cond_or_thumb ();
17038 inst
.instruction
|= LOW4 (dn
) << 16;
17039 inst
.instruction
|= HI1 (dn
) << 7;
17040 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17041 inst
.instruction
|= (bcdebits
& 3) << 5;
17042 inst
.instruction
|= (bcdebits
>> 2) << 21;
17046 case NS_DRR
: /* case 5 (fmdrr). */
17047 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17050 inst
.instruction
= 0xc400b10;
17051 do_vfp_cond_or_thumb ();
17052 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
17053 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
17054 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17055 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
17058 case NS_RS
: /* case 6. */
17061 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17062 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
17063 unsigned abcdebits
= 0;
17065 /* .<dt> is optional here, defaulting to .32. */
17066 if (inst
.vectype
.elems
== 0
17067 && inst
.operands
[0].vectype
.type
== NT_invtype
17068 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17070 inst
.vectype
.el
[0].type
= NT_untyped
;
17071 inst
.vectype
.el
[0].size
= 32;
17072 inst
.vectype
.elems
= 1;
17075 et
= neon_check_type (2, NS_NULL
,
17076 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
17077 logsize
= neon_logbits (et
.size
);
17079 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17081 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17082 && et
.size
!= 32, _(BAD_FPU
));
17083 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17084 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17088 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
17089 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
17090 case 32: abcdebits
= 0x00; break;
17094 abcdebits
|= x
<< logsize
;
17095 inst
.instruction
= 0xe100b10;
17096 do_vfp_cond_or_thumb ();
17097 inst
.instruction
|= LOW4 (dn
) << 16;
17098 inst
.instruction
|= HI1 (dn
) << 7;
17099 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17100 inst
.instruction
|= (abcdebits
& 3) << 5;
17101 inst
.instruction
|= (abcdebits
>> 2) << 21;
17105 case NS_RRD
: /* case 7 (fmrrd). */
17106 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17109 inst
.instruction
= 0xc500b10;
17110 do_vfp_cond_or_thumb ();
17111 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17112 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17113 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17114 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17117 case NS_FF
: /* case 8 (fcpys). */
17118 do_vfp_nsyn_opcode ("fcpys");
17122 case NS_FI
: /* case 10 (fconsts). */
17123 ldconst
= "fconsts";
17125 if (!inst
.operands
[1].immisfloat
)
17128 /* Immediate has to fit in 8 bits so float is enough. */
17129 float imm
= (float) inst
.operands
[1].imm
;
17130 memcpy (&new_imm
, &imm
, sizeof (float));
17131 /* But the assembly may have been written to provide an integer
17132 bit pattern that equates to a float, so check that the
17133 conversion has worked. */
17134 if (is_quarter_float (new_imm
))
17136 if (is_quarter_float (inst
.operands
[1].imm
))
17137 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17139 inst
.operands
[1].imm
= new_imm
;
17140 inst
.operands
[1].immisfloat
= 1;
17144 if (is_quarter_float (inst
.operands
[1].imm
))
17146 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
17147 do_vfp_nsyn_opcode (ldconst
);
17149 /* ARMv8.2 fp16 vmov.f16 instruction. */
17151 do_scalar_fp16_v82_encode ();
17154 first_error (_("immediate out of range"));
17158 case NS_RF
: /* case 12 (fmrs). */
17159 do_vfp_nsyn_opcode ("fmrs");
17160 /* ARMv8.2 fp16 vmov.f16 instruction. */
17162 do_scalar_fp16_v82_encode ();
17166 case NS_FR
: /* case 13 (fmsr). */
17167 do_vfp_nsyn_opcode ("fmsr");
17168 /* ARMv8.2 fp16 vmov.f16 instruction. */
17170 do_scalar_fp16_v82_encode ();
17173 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17174 (one of which is a list), but we have parsed four. Do some fiddling to
17175 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17177 case NS_RRFF
: /* case 14 (fmrrs). */
17178 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
17179 _("VFP registers must be adjacent"));
17180 inst
.operands
[2].imm
= 2;
17181 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17182 do_vfp_nsyn_opcode ("fmrrs");
17185 case NS_FFRR
: /* case 15 (fmsrr). */
17186 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
17187 _("VFP registers must be adjacent"));
17188 inst
.operands
[1] = inst
.operands
[2];
17189 inst
.operands
[2] = inst
.operands
[3];
17190 inst
.operands
[0].imm
= 2;
17191 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17192 do_vfp_nsyn_opcode ("fmsrr");
17196 /* neon_select_shape has determined that the instruction
17197 shape is wrong and has already set the error message. */
17206 do_neon_rshift_round_imm (void)
17208 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17209 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
17210 int imm
= inst
.operands
[2].imm
;
17212 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17215 inst
.operands
[2].present
= 0;
17220 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
17221 _("immediate out of range for shift"));
17222 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
17227 do_neon_movhf (void)
17229 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
17230 constraint (rs
!= NS_HH
, _("invalid suffix"));
17232 if (inst
.cond
!= COND_ALWAYS
)
17236 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17237 " the behaviour is UNPREDICTABLE"));
17241 inst
.error
= BAD_COND
;
17246 do_vfp_sp_monadic ();
17249 inst
.instruction
|= 0xf0000000;
17253 do_neon_movl (void)
17255 struct neon_type_el et
= neon_check_type (2, NS_QD
,
17256 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17257 unsigned sizebits
= et
.size
>> 3;
17258 inst
.instruction
|= sizebits
<< 19;
17259 neon_two_same (0, et
.type
== NT_unsigned
, -1);
17265 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17266 struct neon_type_el et
= neon_check_type (2, rs
,
17267 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17268 NEON_ENCODE (INTEGER
, inst
);
17269 neon_two_same (neon_quad (rs
), 1, et
.size
);
17273 do_neon_zip_uzp (void)
17275 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17276 struct neon_type_el et
= neon_check_type (2, rs
,
17277 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17278 if (rs
== NS_DD
&& et
.size
== 32)
17280 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17281 inst
.instruction
= N_MNEM_vtrn
;
17285 neon_two_same (neon_quad (rs
), 1, et
.size
);
17289 do_neon_sat_abs_neg (void)
17291 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17292 struct neon_type_el et
= neon_check_type (2, rs
,
17293 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17294 neon_two_same (neon_quad (rs
), 1, et
.size
);
17298 do_neon_pair_long (void)
17300 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17301 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
17302 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17303 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
17304 neon_two_same (neon_quad (rs
), 1, et
.size
);
17308 do_neon_recip_est (void)
17310 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17311 struct neon_type_el et
= neon_check_type (2, rs
,
17312 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
17313 inst
.instruction
|= (et
.type
== NT_float
) << 8;
17314 neon_two_same (neon_quad (rs
), 1, et
.size
);
17320 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17321 struct neon_type_el et
= neon_check_type (2, rs
,
17322 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17323 neon_two_same (neon_quad (rs
), 1, et
.size
);
17329 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17330 struct neon_type_el et
= neon_check_type (2, rs
,
17331 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
17332 neon_two_same (neon_quad (rs
), 1, et
.size
);
17338 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17339 struct neon_type_el et
= neon_check_type (2, rs
,
17340 N_EQK
| N_INT
, N_8
| N_KEY
);
17341 neon_two_same (neon_quad (rs
), 1, et
.size
);
17347 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17348 neon_two_same (neon_quad (rs
), 1, -1);
17352 do_neon_tbl_tbx (void)
17354 unsigned listlenbits
;
17355 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
17357 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
17359 first_error (_("bad list length for table lookup"));
17363 listlenbits
= inst
.operands
[1].imm
- 1;
17364 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17365 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17366 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17367 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17368 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17369 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17370 inst
.instruction
|= listlenbits
<< 8;
17372 neon_dp_fixup (&inst
);
17376 do_neon_ldm_stm (void)
17378 /* P, U and L bits are part of bitmask. */
17379 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
17380 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
17382 if (inst
.operands
[1].issingle
)
17384 do_vfp_nsyn_ldm_stm (is_dbmode
);
17388 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
17389 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17391 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
17392 _("register list must contain at least 1 and at most 16 "
17395 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
17396 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
17397 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17398 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
17400 inst
.instruction
|= offsetbits
;
17402 do_vfp_cond_or_thumb ();
17406 do_neon_ldr_str (void)
17408 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
17410 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17411 And is UNPREDICTABLE in thumb mode. */
17413 && inst
.operands
[1].reg
== REG_PC
17414 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
17417 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17418 else if (warn_on_deprecated
)
17419 as_tsktsk (_("Use of PC here is deprecated"));
17422 if (inst
.operands
[0].issingle
)
17425 do_vfp_nsyn_opcode ("flds");
17427 do_vfp_nsyn_opcode ("fsts");
17429 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17430 if (inst
.vectype
.el
[0].size
== 16)
17431 do_scalar_fp16_v82_encode ();
17436 do_vfp_nsyn_opcode ("fldd");
17438 do_vfp_nsyn_opcode ("fstd");
17443 do_t_vldr_vstr_sysreg (void)
17445 int fp_vldr_bitno
= 20, sysreg_vldr_bitno
= 20;
17446 bfd_boolean is_vldr
= ((inst
.instruction
& (1 << fp_vldr_bitno
)) != 0);
17448 /* Use of PC is UNPREDICTABLE. */
17449 if (inst
.operands
[1].reg
== REG_PC
)
17450 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17452 if (inst
.operands
[1].immisreg
)
17453 inst
.error
= _("instruction does not accept register index");
17455 if (!inst
.operands
[1].isreg
)
17456 inst
.error
= _("instruction does not accept PC-relative addressing");
17458 if (abs (inst
.operands
[1].imm
) >= (1 << 7))
17459 inst
.error
= _("immediate value out of range");
17461 inst
.instruction
= 0xec000f80;
17463 inst
.instruction
|= 1 << sysreg_vldr_bitno
;
17464 encode_arm_cp_address (1, TRUE
, FALSE
, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
);
17465 inst
.instruction
|= (inst
.operands
[0].imm
& 0x7) << 13;
17466 inst
.instruction
|= (inst
.operands
[0].imm
& 0x8) << 19;
17470 do_vldr_vstr (void)
17472 bfd_boolean sysreg_op
= !inst
.operands
[0].isreg
;
17474 /* VLDR/VSTR (System Register). */
17477 if (!mark_feature_used (&arm_ext_v8_1m_main
))
17478 as_bad (_("Instruction not permitted on this architecture"));
17480 do_t_vldr_vstr_sysreg ();
17485 if (!mark_feature_used (&fpu_vfp_ext_v1xd
))
17486 as_bad (_("Instruction not permitted on this architecture"));
17487 do_neon_ldr_str ();
17491 /* "interleave" version also handles non-interleaving register VLD1/VST1
17495 do_neon_ld_st_interleave (void)
17497 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
17498 N_8
| N_16
| N_32
| N_64
);
17499 unsigned alignbits
= 0;
17501 /* The bits in this table go:
17502 0: register stride of one (0) or two (1)
17503 1,2: register list length, minus one (1, 2, 3, 4).
17504 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17505 We use -1 for invalid entries. */
17506 const int typetable
[] =
17508 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17509 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17510 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17511 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17515 if (et
.type
== NT_invtype
)
17518 if (inst
.operands
[1].immisalign
)
17519 switch (inst
.operands
[1].imm
>> 8)
17521 case 64: alignbits
= 1; break;
17523 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
17524 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17525 goto bad_alignment
;
17529 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17530 goto bad_alignment
;
17535 first_error (_("bad alignment"));
17539 inst
.instruction
|= alignbits
<< 4;
17540 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17542 /* Bits [4:6] of the immediate in a list specifier encode register stride
17543 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17544 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17545 up the right value for "type" in a table based on this value and the given
17546 list style, then stick it back. */
17547 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
17548 | (((inst
.instruction
>> 8) & 3) << 3);
17550 typebits
= typetable
[idx
];
17552 constraint (typebits
== -1, _("bad list type for instruction"));
17553 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
17554 _("bad element type for instruction"));
17556 inst
.instruction
&= ~0xf00;
17557 inst
.instruction
|= typebits
<< 8;
17560 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17561 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17562 otherwise. The variable arguments are a list of pairs of legal (size, align)
17563 values, terminated with -1. */
17566 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
17569 int result
= FAIL
, thissize
, thisalign
;
17571 if (!inst
.operands
[1].immisalign
)
17577 va_start (ap
, do_alignment
);
17581 thissize
= va_arg (ap
, int);
17582 if (thissize
== -1)
17584 thisalign
= va_arg (ap
, int);
17586 if (size
== thissize
&& align
== thisalign
)
17589 while (result
!= SUCCESS
);
17593 if (result
== SUCCESS
)
17596 first_error (_("unsupported alignment for instruction"));
17602 do_neon_ld_st_lane (void)
17604 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17605 int align_good
, do_alignment
= 0;
17606 int logsize
= neon_logbits (et
.size
);
17607 int align
= inst
.operands
[1].imm
>> 8;
17608 int n
= (inst
.instruction
>> 8) & 3;
17609 int max_el
= 64 / et
.size
;
17611 if (et
.type
== NT_invtype
)
17614 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
17615 _("bad list length"));
17616 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
17617 _("scalar index out of range"));
17618 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
17620 _("stride of 2 unavailable when element size is 8"));
17624 case 0: /* VLD1 / VST1. */
17625 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
17627 if (align_good
== FAIL
)
17631 unsigned alignbits
= 0;
17634 case 16: alignbits
= 0x1; break;
17635 case 32: alignbits
= 0x3; break;
17638 inst
.instruction
|= alignbits
<< 4;
17642 case 1: /* VLD2 / VST2. */
17643 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
17644 16, 32, 32, 64, -1);
17645 if (align_good
== FAIL
)
17648 inst
.instruction
|= 1 << 4;
17651 case 2: /* VLD3 / VST3. */
17652 constraint (inst
.operands
[1].immisalign
,
17653 _("can't use alignment with this instruction"));
17656 case 3: /* VLD4 / VST4. */
17657 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17658 16, 64, 32, 64, 32, 128, -1);
17659 if (align_good
== FAIL
)
17663 unsigned alignbits
= 0;
17666 case 8: alignbits
= 0x1; break;
17667 case 16: alignbits
= 0x1; break;
17668 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
17671 inst
.instruction
|= alignbits
<< 4;
17678 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17679 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17680 inst
.instruction
|= 1 << (4 + logsize
);
17682 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
17683 inst
.instruction
|= logsize
<< 10;
17686 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17689 do_neon_ld_dup (void)
17691 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17692 int align_good
, do_alignment
= 0;
17694 if (et
.type
== NT_invtype
)
17697 switch ((inst
.instruction
>> 8) & 3)
17699 case 0: /* VLD1. */
17700 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
17701 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17702 &do_alignment
, 16, 16, 32, 32, -1);
17703 if (align_good
== FAIL
)
17705 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
17708 case 2: inst
.instruction
|= 1 << 5; break;
17709 default: first_error (_("bad list length")); return;
17711 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17714 case 1: /* VLD2. */
17715 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17716 &do_alignment
, 8, 16, 16, 32, 32, 64,
17718 if (align_good
== FAIL
)
17720 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
17721 _("bad list length"));
17722 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17723 inst
.instruction
|= 1 << 5;
17724 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17727 case 2: /* VLD3. */
17728 constraint (inst
.operands
[1].immisalign
,
17729 _("can't use alignment with this instruction"));
17730 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
17731 _("bad list length"));
17732 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17733 inst
.instruction
|= 1 << 5;
17734 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17737 case 3: /* VLD4. */
17739 int align
= inst
.operands
[1].imm
>> 8;
17740 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17741 16, 64, 32, 64, 32, 128, -1);
17742 if (align_good
== FAIL
)
17744 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
17745 _("bad list length"));
17746 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17747 inst
.instruction
|= 1 << 5;
17748 if (et
.size
== 32 && align
== 128)
17749 inst
.instruction
|= 0x3 << 6;
17751 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17758 inst
.instruction
|= do_alignment
<< 4;
17761 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17762 apart from bits [11:4]. */
17765 do_neon_ldx_stx (void)
17767 if (inst
.operands
[1].isreg
)
17768 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
17770 switch (NEON_LANE (inst
.operands
[0].imm
))
17772 case NEON_INTERLEAVE_LANES
:
17773 NEON_ENCODE (INTERLV
, inst
);
17774 do_neon_ld_st_interleave ();
17777 case NEON_ALL_LANES
:
17778 NEON_ENCODE (DUP
, inst
);
17779 if (inst
.instruction
== N_INV
)
17781 first_error ("only loads support such operands");
17788 NEON_ENCODE (LANE
, inst
);
17789 do_neon_ld_st_lane ();
17792 /* L bit comes from bit mask. */
17793 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17794 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17795 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17797 if (inst
.operands
[1].postind
)
17799 int postreg
= inst
.operands
[1].imm
& 0xf;
17800 constraint (!inst
.operands
[1].immisreg
,
17801 _("post-index must be a register"));
17802 constraint (postreg
== 0xd || postreg
== 0xf,
17803 _("bad register for post-index"));
17804 inst
.instruction
|= postreg
;
17808 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17809 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
17810 || inst
.relocs
[0].exp
.X_add_number
!= 0,
17813 if (inst
.operands
[1].writeback
)
17815 inst
.instruction
|= 0xd;
17818 inst
.instruction
|= 0xf;
17822 inst
.instruction
|= 0xf9000000;
17824 inst
.instruction
|= 0xf4000000;
17829 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
17831 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17832 D register operands. */
17833 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17834 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17837 NEON_ENCODE (FPV8
, inst
);
17839 if (rs
== NS_FFF
|| rs
== NS_HHH
)
17841 do_vfp_sp_dyadic ();
17843 /* ARMv8.2 fp16 instruction. */
17845 do_scalar_fp16_v82_encode ();
17848 do_vfp_dp_rd_rn_rm ();
17851 inst
.instruction
|= 0x100;
17853 inst
.instruction
|= 0xf0000000;
17859 set_it_insn_type (OUTSIDE_IT_INSN
);
17861 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
17862 first_error (_("invalid instruction shape"));
17868 set_it_insn_type (OUTSIDE_IT_INSN
);
17870 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
17873 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17876 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
17880 do_vrint_1 (enum neon_cvt_mode mode
)
17882 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
17883 struct neon_type_el et
;
17888 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17889 D register operands. */
17890 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17891 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17894 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
17896 if (et
.type
!= NT_invtype
)
17898 /* VFP encodings. */
17899 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17900 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
17901 set_it_insn_type (OUTSIDE_IT_INSN
);
17903 NEON_ENCODE (FPV8
, inst
);
17904 if (rs
== NS_FF
|| rs
== NS_HH
)
17905 do_vfp_sp_monadic ();
17907 do_vfp_dp_rd_rm ();
17911 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
17912 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
17913 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
17914 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
17915 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
17916 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
17917 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
17921 inst
.instruction
|= (rs
== NS_DD
) << 8;
17922 do_vfp_cond_or_thumb ();
17924 /* ARMv8.2 fp16 vrint instruction. */
17926 do_scalar_fp16_v82_encode ();
17930 /* Neon encodings (or something broken...). */
17932 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
17934 if (et
.type
== NT_invtype
)
17937 set_it_insn_type (OUTSIDE_IT_INSN
);
17938 NEON_ENCODE (FLOAT
, inst
);
17940 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17943 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17944 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17945 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17946 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17947 inst
.instruction
|= neon_quad (rs
) << 6;
17948 /* Mask off the original size bits and reencode them. */
17949 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
17950 | neon_logbits (et
.size
) << 18);
17954 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17955 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17956 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17957 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17958 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17959 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17960 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17965 inst
.instruction
|= 0xfc000000;
17967 inst
.instruction
|= 0xf0000000;
17974 do_vrint_1 (neon_cvt_mode_x
);
17980 do_vrint_1 (neon_cvt_mode_z
);
17986 do_vrint_1 (neon_cvt_mode_r
);
17992 do_vrint_1 (neon_cvt_mode_a
);
17998 do_vrint_1 (neon_cvt_mode_n
);
18004 do_vrint_1 (neon_cvt_mode_p
);
18010 do_vrint_1 (neon_cvt_mode_m
);
18014 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
18016 unsigned regno
= NEON_SCALAR_REG (opnd
);
18017 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
18019 if (elsize
== 16 && elno
< 2 && regno
< 16)
18020 return regno
| (elno
<< 4);
18021 else if (elsize
== 32 && elno
== 0)
18024 first_error (_("scalar out of range"));
18031 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18033 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18034 _("expression too complex"));
18035 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18036 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
18037 _("immediate out of range"));
18039 if (inst
.operands
[2].isscalar
)
18041 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
18042 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18043 N_KEY
| N_F16
| N_F32
).size
;
18044 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
18046 inst
.instruction
= 0xfe000800;
18047 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18048 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18049 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18050 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
18051 inst
.instruction
|= LOW4 (m
);
18052 inst
.instruction
|= HI1 (m
) << 5;
18053 inst
.instruction
|= neon_quad (rs
) << 6;
18054 inst
.instruction
|= rot
<< 20;
18055 inst
.instruction
|= (size
== 32) << 23;
18059 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18060 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18061 N_KEY
| N_F16
| N_F32
).size
;
18062 neon_three_same (neon_quad (rs
), 0, -1);
18063 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18064 inst
.instruction
|= 0xfc200800;
18065 inst
.instruction
|= rot
<< 23;
18066 inst
.instruction
|= (size
== 32) << 20;
18073 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18075 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18076 _("expression too complex"));
18077 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18078 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
18079 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18080 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18081 N_KEY
| N_F16
| N_F32
).size
;
18082 neon_three_same (neon_quad (rs
), 0, -1);
18083 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18084 inst
.instruction
|= 0xfc800800;
18085 inst
.instruction
|= (rot
== 270) << 24;
18086 inst
.instruction
|= (size
== 32) << 20;
18089 /* Dot Product instructions encoding support. */
18092 do_neon_dotproduct (int unsigned_p
)
18094 enum neon_shape rs
;
18095 unsigned scalar_oprd2
= 0;
18098 if (inst
.cond
!= COND_ALWAYS
)
18099 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18100 "is UNPREDICTABLE"));
18102 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18105 /* Dot Product instructions are in three-same D/Q register format or the third
18106 operand can be a scalar index register. */
18107 if (inst
.operands
[2].isscalar
)
18109 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
18110 high8
= 0xfe000000;
18111 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
18115 high8
= 0xfc000000;
18116 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18120 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
18122 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
18124 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18125 Product instruction, so we pass 0 as the "ubit" parameter. And the
18126 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18127 neon_three_same (neon_quad (rs
), 0, 32);
18129 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18130 different NEON three-same encoding. */
18131 inst
.instruction
&= 0x00ffffff;
18132 inst
.instruction
|= high8
;
18133 /* Encode 'U' bit which indicates signedness. */
18134 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
18135 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18136 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18137 the instruction encoding. */
18138 if (inst
.operands
[2].isscalar
)
18140 inst
.instruction
&= 0xffffffd0;
18141 inst
.instruction
|= LOW4 (scalar_oprd2
);
18142 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
18146 /* Dot Product instructions for signed integer. */
18149 do_neon_dotproduct_s (void)
18151 return do_neon_dotproduct (0);
18154 /* Dot Product instructions for unsigned integer. */
18157 do_neon_dotproduct_u (void)
18159 return do_neon_dotproduct (1);
18162 /* Crypto v1 instructions. */
18164 do_crypto_2op_1 (unsigned elttype
, int op
)
18166 set_it_insn_type (OUTSIDE_IT_INSN
);
18168 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
18174 NEON_ENCODE (INTEGER
, inst
);
18175 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18176 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18177 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18178 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18180 inst
.instruction
|= op
<< 6;
18183 inst
.instruction
|= 0xfc000000;
18185 inst
.instruction
|= 0xf0000000;
18189 do_crypto_3op_1 (int u
, int op
)
18191 set_it_insn_type (OUTSIDE_IT_INSN
);
18193 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
18194 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
18199 NEON_ENCODE (INTEGER
, inst
);
18200 neon_three_same (1, u
, 8 << op
);
18206 do_crypto_2op_1 (N_8
, 0);
18212 do_crypto_2op_1 (N_8
, 1);
18218 do_crypto_2op_1 (N_8
, 2);
18224 do_crypto_2op_1 (N_8
, 3);
18230 do_crypto_3op_1 (0, 0);
18236 do_crypto_3op_1 (0, 1);
18242 do_crypto_3op_1 (0, 2);
18248 do_crypto_3op_1 (0, 3);
18254 do_crypto_3op_1 (1, 0);
18260 do_crypto_3op_1 (1, 1);
18264 do_sha256su1 (void)
18266 do_crypto_3op_1 (1, 2);
18272 do_crypto_2op_1 (N_32
, -1);
18278 do_crypto_2op_1 (N_32
, 0);
18282 do_sha256su0 (void)
18284 do_crypto_2op_1 (N_32
, 1);
18288 do_crc32_1 (unsigned int poly
, unsigned int sz
)
18290 unsigned int Rd
= inst
.operands
[0].reg
;
18291 unsigned int Rn
= inst
.operands
[1].reg
;
18292 unsigned int Rm
= inst
.operands
[2].reg
;
18294 set_it_insn_type (OUTSIDE_IT_INSN
);
18295 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
18296 inst
.instruction
|= LOW4 (Rn
) << 16;
18297 inst
.instruction
|= LOW4 (Rm
);
18298 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
18299 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
18301 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
18302 as_warn (UNPRED_REG ("r15"));
18344 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18346 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
18347 do_vfp_sp_dp_cvt ();
18348 do_vfp_cond_or_thumb ();
18352 /* Overall per-instruction processing. */
18354 /* We need to be able to fix up arbitrary expressions in some statements.
18355 This is so that we can handle symbols that are an arbitrary distance from
18356 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18357 which returns part of an address in a form which will be valid for
18358 a data instruction. We do this by pushing the expression into a symbol
18359 in the expr_section, and creating a fix for that. */
18362 fix_new_arm (fragS
* frag
,
18376 /* Create an absolute valued symbol, so we have something to
18377 refer to in the object file. Unfortunately for us, gas's
18378 generic expression parsing will already have folded out
18379 any use of .set foo/.type foo %function that may have
18380 been used to set type information of the target location,
18381 that's being specified symbolically. We have to presume
18382 the user knows what they are doing. */
18386 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
18388 symbol
= symbol_find_or_make (name
);
18389 S_SET_SEGMENT (symbol
, absolute_section
);
18390 symbol_set_frag (symbol
, &zero_address_frag
);
18391 S_SET_VALUE (symbol
, exp
->X_add_number
);
18392 exp
->X_op
= O_symbol
;
18393 exp
->X_add_symbol
= symbol
;
18394 exp
->X_add_number
= 0;
18400 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
18401 (enum bfd_reloc_code_real
) reloc
);
18405 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
18406 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
18410 /* Mark whether the fix is to a THUMB instruction, or an ARM
18412 new_fix
->tc_fix_data
= thumb_mode
;
18415 /* Create a frg for an instruction requiring relaxation. */
18417 output_relax_insn (void)
18423 /* The size of the instruction is unknown, so tie the debug info to the
18424 start of the instruction. */
18425 dwarf2_emit_insn (0);
18427 switch (inst
.relocs
[0].exp
.X_op
)
18430 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
18431 offset
= inst
.relocs
[0].exp
.X_add_number
;
18435 offset
= inst
.relocs
[0].exp
.X_add_number
;
18438 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
18442 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
18443 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
18444 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
18447 /* Write a 32-bit thumb instruction to buf. */
18449 put_thumb32_insn (char * buf
, unsigned long insn
)
18451 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
18452 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
18456 output_inst (const char * str
)
18462 as_bad ("%s -- `%s'", inst
.error
, str
);
18467 output_relax_insn ();
18470 if (inst
.size
== 0)
18473 to
= frag_more (inst
.size
);
18474 /* PR 9814: Record the thumb mode into the current frag so that we know
18475 what type of NOP padding to use, if necessary. We override any previous
18476 setting so that if the mode has changed then the NOPS that we use will
18477 match the encoding of the last instruction in the frag. */
18478 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18480 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
18482 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
18483 put_thumb32_insn (to
, inst
.instruction
);
18485 else if (inst
.size
> INSN_SIZE
)
18487 gas_assert (inst
.size
== (2 * INSN_SIZE
));
18488 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
18489 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
18492 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
18495 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
18497 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
18498 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
18499 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
18500 inst
.relocs
[r
].type
);
18503 dwarf2_emit_insn (inst
.size
);
18507 output_it_inst (int cond
, int mask
, char * to
)
18509 unsigned long instruction
= 0xbf00;
18512 instruction
|= mask
;
18513 instruction
|= cond
<< 4;
18517 to
= frag_more (2);
18519 dwarf2_emit_insn (2);
18523 md_number_to_chars (to
, instruction
, 2);
18528 /* Tag values used in struct asm_opcode's tag field. */
18531 OT_unconditional
, /* Instruction cannot be conditionalized.
18532 The ARM condition field is still 0xE. */
18533 OT_unconditionalF
, /* Instruction cannot be conditionalized
18534 and carries 0xF in its ARM condition field. */
18535 OT_csuffix
, /* Instruction takes a conditional suffix. */
18536 OT_csuffixF
, /* Some forms of the instruction take a conditional
18537 suffix, others place 0xF where the condition field
18539 OT_cinfix3
, /* Instruction takes a conditional infix,
18540 beginning at character index 3. (In
18541 unified mode, it becomes a suffix.) */
18542 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
18543 tsts, cmps, cmns, and teqs. */
18544 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
18545 character index 3, even in unified mode. Used for
18546 legacy instructions where suffix and infix forms
18547 may be ambiguous. */
18548 OT_csuf_or_in3
, /* Instruction takes either a conditional
18549 suffix or an infix at character index 3. */
18550 OT_odd_infix_unc
, /* This is the unconditional variant of an
18551 instruction that takes a conditional infix
18552 at an unusual position. In unified mode,
18553 this variant will accept a suffix. */
18554 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
18555 are the conditional variants of instructions that
18556 take conditional infixes in unusual positions.
18557 The infix appears at character index
18558 (tag - OT_odd_infix_0). These are not accepted
18559 in unified mode. */
18562 /* Subroutine of md_assemble, responsible for looking up the primary
18563 opcode from the mnemonic the user wrote. STR points to the
18564 beginning of the mnemonic.
18566 This is not simply a hash table lookup, because of conditional
18567 variants. Most instructions have conditional variants, which are
18568 expressed with a _conditional affix_ to the mnemonic. If we were
18569 to encode each conditional variant as a literal string in the opcode
18570 table, it would have approximately 20,000 entries.
18572 Most mnemonics take this affix as a suffix, and in unified syntax,
18573 'most' is upgraded to 'all'. However, in the divided syntax, some
18574 instructions take the affix as an infix, notably the s-variants of
18575 the arithmetic instructions. Of those instructions, all but six
18576 have the infix appear after the third character of the mnemonic.
18578 Accordingly, the algorithm for looking up primary opcodes given
18581 1. Look up the identifier in the opcode table.
18582 If we find a match, go to step U.
18584 2. Look up the last two characters of the identifier in the
18585 conditions table. If we find a match, look up the first N-2
18586 characters of the identifier in the opcode table. If we
18587 find a match, go to step CE.
18589 3. Look up the fourth and fifth characters of the identifier in
18590 the conditions table. If we find a match, extract those
18591 characters from the identifier, and look up the remaining
18592 characters in the opcode table. If we find a match, go
18597 U. Examine the tag field of the opcode structure, in case this is
18598 one of the six instructions with its conditional infix in an
18599 unusual place. If it is, the tag tells us where to find the
18600 infix; look it up in the conditions table and set inst.cond
18601 accordingly. Otherwise, this is an unconditional instruction.
18602 Again set inst.cond accordingly. Return the opcode structure.
18604 CE. Examine the tag field to make sure this is an instruction that
18605 should receive a conditional suffix. If it is not, fail.
18606 Otherwise, set inst.cond from the suffix we already looked up,
18607 and return the opcode structure.
18609 CM. Examine the tag field to make sure this is an instruction that
18610 should receive a conditional infix after the third character.
18611 If it is not, fail. Otherwise, undo the edits to the current
18612 line of input and proceed as for case CE. */
18614 static const struct asm_opcode
*
18615 opcode_lookup (char **str
)
18619 const struct asm_opcode
*opcode
;
18620 const struct asm_cond
*cond
;
18623 /* Scan up to the end of the mnemonic, which must end in white space,
18624 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18625 for (base
= end
= *str
; *end
!= '\0'; end
++)
18626 if (*end
== ' ' || *end
== '.')
18632 /* Handle a possible width suffix and/or Neon type suffix. */
18637 /* The .w and .n suffixes are only valid if the unified syntax is in
18639 if (unified_syntax
&& end
[1] == 'w')
18641 else if (unified_syntax
&& end
[1] == 'n')
18646 inst
.vectype
.elems
= 0;
18648 *str
= end
+ offset
;
18650 if (end
[offset
] == '.')
18652 /* See if we have a Neon type suffix (possible in either unified or
18653 non-unified ARM syntax mode). */
18654 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
18657 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
18663 /* Look for unaffixed or special-case affixed mnemonic. */
18664 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18669 if (opcode
->tag
< OT_odd_infix_0
)
18671 inst
.cond
= COND_ALWAYS
;
18675 if (warn_on_deprecated
&& unified_syntax
)
18676 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18677 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
18678 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18681 inst
.cond
= cond
->value
;
18685 /* Cannot have a conditional suffix on a mnemonic of less than two
18687 if (end
- base
< 3)
18690 /* Look for suffixed mnemonic. */
18692 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18693 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18695 if (opcode
&& cond
)
18698 switch (opcode
->tag
)
18700 case OT_cinfix3_legacy
:
18701 /* Ignore conditional suffixes matched on infix only mnemonics. */
18705 case OT_cinfix3_deprecated
:
18706 case OT_odd_infix_unc
:
18707 if (!unified_syntax
)
18709 /* Fall through. */
18713 case OT_csuf_or_in3
:
18714 inst
.cond
= cond
->value
;
18717 case OT_unconditional
:
18718 case OT_unconditionalF
:
18720 inst
.cond
= cond
->value
;
18723 /* Delayed diagnostic. */
18724 inst
.error
= BAD_COND
;
18725 inst
.cond
= COND_ALWAYS
;
18734 /* Cannot have a usual-position infix on a mnemonic of less than
18735 six characters (five would be a suffix). */
18736 if (end
- base
< 6)
18739 /* Look for infixed mnemonic in the usual position. */
18741 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18745 memcpy (save
, affix
, 2);
18746 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
18747 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18749 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
18750 memcpy (affix
, save
, 2);
18753 && (opcode
->tag
== OT_cinfix3
18754 || opcode
->tag
== OT_cinfix3_deprecated
18755 || opcode
->tag
== OT_csuf_or_in3
18756 || opcode
->tag
== OT_cinfix3_legacy
))
18759 if (warn_on_deprecated
&& unified_syntax
18760 && (opcode
->tag
== OT_cinfix3
18761 || opcode
->tag
== OT_cinfix3_deprecated
))
18762 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18764 inst
.cond
= cond
->value
;
18771 /* This function generates an initial IT instruction, leaving its block
18772 virtually open for the new instructions. Eventually,
18773 the mask will be updated by now_it_add_mask () each time
18774 a new instruction needs to be included in the IT block.
18775 Finally, the block is closed with close_automatic_it_block ().
18776 The block closure can be requested either from md_assemble (),
18777 a tencode (), or due to a label hook. */
18780 new_automatic_it_block (int cond
)
18782 now_it
.state
= AUTOMATIC_IT_BLOCK
;
18783 now_it
.mask
= 0x18;
18785 now_it
.block_length
= 1;
18786 mapping_state (MAP_THUMB
);
18787 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
18788 now_it
.warn_deprecated
= FALSE
;
18789 now_it
.insn_cond
= TRUE
;
18792 /* Close an automatic IT block.
18793 See comments in new_automatic_it_block (). */
18796 close_automatic_it_block (void)
18798 now_it
.mask
= 0x10;
18799 now_it
.block_length
= 0;
18802 /* Update the mask of the current automatically-generated IT
18803 instruction. See comments in new_automatic_it_block (). */
18806 now_it_add_mask (int cond
)
18808 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18809 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18810 | ((bitvalue) << (nbit)))
18811 const int resulting_bit
= (cond
& 1);
18813 now_it
.mask
&= 0xf;
18814 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18816 (5 - now_it
.block_length
));
18817 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18819 ((5 - now_it
.block_length
) - 1) );
18820 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
18823 #undef SET_BIT_VALUE
18826 /* The IT blocks handling machinery is accessed through the these functions:
18827 it_fsm_pre_encode () from md_assemble ()
18828 set_it_insn_type () optional, from the tencode functions
18829 set_it_insn_type_last () ditto
18830 in_it_block () ditto
18831 it_fsm_post_encode () from md_assemble ()
18832 force_automatic_it_block_close () from label handling functions
18835 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18836 initializing the IT insn type with a generic initial value depending
18837 on the inst.condition.
18838 2) During the tencode function, two things may happen:
18839 a) The tencode function overrides the IT insn type by
18840 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18841 b) The tencode function queries the IT block state by
18842 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18844 Both set_it_insn_type and in_it_block run the internal FSM state
18845 handling function (handle_it_state), because: a) setting the IT insn
18846 type may incur in an invalid state (exiting the function),
18847 and b) querying the state requires the FSM to be updated.
18848 Specifically we want to avoid creating an IT block for conditional
18849 branches, so it_fsm_pre_encode is actually a guess and we can't
18850 determine whether an IT block is required until the tencode () routine
18851 has decided what type of instruction this actually it.
18852 Because of this, if set_it_insn_type and in_it_block have to be used,
18853 set_it_insn_type has to be called first.
18855 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18856 determines the insn IT type depending on the inst.cond code.
18857 When a tencode () routine encodes an instruction that can be
18858 either outside an IT block, or, in the case of being inside, has to be
18859 the last one, set_it_insn_type_last () will determine the proper
18860 IT instruction type based on the inst.cond code. Otherwise,
18861 set_it_insn_type can be called for overriding that logic or
18862 for covering other cases.
18864 Calling handle_it_state () may not transition the IT block state to
18865 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18866 still queried. Instead, if the FSM determines that the state should
18867 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18868 after the tencode () function: that's what it_fsm_post_encode () does.
18870 Since in_it_block () calls the state handling function to get an
18871 updated state, an error may occur (due to invalid insns combination).
18872 In that case, inst.error is set.
18873 Therefore, inst.error has to be checked after the execution of
18874 the tencode () routine.
18876 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18877 any pending state change (if any) that didn't take place in
18878 handle_it_state () as explained above. */
18881 it_fsm_pre_encode (void)
18883 if (inst
.cond
!= COND_ALWAYS
)
18884 inst
.it_insn_type
= INSIDE_IT_INSN
;
18886 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
18888 now_it
.state_handled
= 0;
18891 /* IT state FSM handling function. */
18894 handle_it_state (void)
18896 now_it
.state_handled
= 1;
18897 now_it
.insn_cond
= FALSE
;
18899 switch (now_it
.state
)
18901 case OUTSIDE_IT_BLOCK
:
18902 switch (inst
.it_insn_type
)
18904 case OUTSIDE_IT_INSN
:
18907 case INSIDE_IT_INSN
:
18908 case INSIDE_IT_LAST_INSN
:
18909 if (thumb_mode
== 0)
18912 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
18913 as_tsktsk (_("Warning: conditional outside an IT block"\
18918 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
18919 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
18921 /* Automatically generate the IT instruction. */
18922 new_automatic_it_block (inst
.cond
);
18923 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
18924 close_automatic_it_block ();
18928 inst
.error
= BAD_OUT_IT
;
18934 case IF_INSIDE_IT_LAST_INSN
:
18935 case NEUTRAL_IT_INSN
:
18939 now_it
.state
= MANUAL_IT_BLOCK
;
18940 now_it
.block_length
= 0;
18945 case AUTOMATIC_IT_BLOCK
:
18946 /* Three things may happen now:
18947 a) We should increment current it block size;
18948 b) We should close current it block (closing insn or 4 insns);
18949 c) We should close current it block and start a new one (due
18950 to incompatible conditions or
18951 4 insns-length block reached). */
18953 switch (inst
.it_insn_type
)
18955 case OUTSIDE_IT_INSN
:
18956 /* The closure of the block shall happen immediately,
18957 so any in_it_block () call reports the block as closed. */
18958 force_automatic_it_block_close ();
18961 case INSIDE_IT_INSN
:
18962 case INSIDE_IT_LAST_INSN
:
18963 case IF_INSIDE_IT_LAST_INSN
:
18964 now_it
.block_length
++;
18966 if (now_it
.block_length
> 4
18967 || !now_it_compatible (inst
.cond
))
18969 force_automatic_it_block_close ();
18970 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
18971 new_automatic_it_block (inst
.cond
);
18975 now_it
.insn_cond
= TRUE
;
18976 now_it_add_mask (inst
.cond
);
18979 if (now_it
.state
== AUTOMATIC_IT_BLOCK
18980 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
18981 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
18982 close_automatic_it_block ();
18985 case NEUTRAL_IT_INSN
:
18986 now_it
.block_length
++;
18987 now_it
.insn_cond
= TRUE
;
18989 if (now_it
.block_length
> 4)
18990 force_automatic_it_block_close ();
18992 now_it_add_mask (now_it
.cc
& 1);
18996 close_automatic_it_block ();
18997 now_it
.state
= MANUAL_IT_BLOCK
;
19002 case MANUAL_IT_BLOCK
:
19004 /* Check conditional suffixes. */
19005 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
19008 now_it
.mask
&= 0x1f;
19009 is_last
= (now_it
.mask
== 0x10);
19010 now_it
.insn_cond
= TRUE
;
19012 switch (inst
.it_insn_type
)
19014 case OUTSIDE_IT_INSN
:
19015 inst
.error
= BAD_NOT_IT
;
19018 case INSIDE_IT_INSN
:
19019 if (cond
!= inst
.cond
)
19021 inst
.error
= BAD_IT_COND
;
19026 case INSIDE_IT_LAST_INSN
:
19027 case IF_INSIDE_IT_LAST_INSN
:
19028 if (cond
!= inst
.cond
)
19030 inst
.error
= BAD_IT_COND
;
19035 inst
.error
= BAD_BRANCH
;
19040 case NEUTRAL_IT_INSN
:
19041 /* The BKPT instruction is unconditional even in an IT block. */
19045 inst
.error
= BAD_IT_IT
;
19055 struct depr_insn_mask
19057 unsigned long pattern
;
19058 unsigned long mask
;
19059 const char* description
;
19062 /* List of 16-bit instruction patterns deprecated in an IT block in
19064 static const struct depr_insn_mask depr_it_insns
[] = {
19065 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19066 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19067 { 0xa000, 0xb800, N_("ADR") },
19068 { 0x4800, 0xf800, N_("Literal loads") },
19069 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19070 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19071 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19072 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19073 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19078 it_fsm_post_encode (void)
19082 if (!now_it
.state_handled
)
19083 handle_it_state ();
19085 if (now_it
.insn_cond
19086 && !now_it
.warn_deprecated
19087 && warn_on_deprecated
19088 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
19089 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
19091 if (inst
.instruction
>= 0x10000)
19093 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19094 "performance deprecated in ARMv8-A and ARMv8-R"));
19095 now_it
.warn_deprecated
= TRUE
;
19099 const struct depr_insn_mask
*p
= depr_it_insns
;
19101 while (p
->mask
!= 0)
19103 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
19105 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19106 "instructions of the following class are "
19107 "performance deprecated in ARMv8-A and "
19108 "ARMv8-R: %s"), p
->description
);
19109 now_it
.warn_deprecated
= TRUE
;
19117 if (now_it
.block_length
> 1)
19119 as_tsktsk (_("IT blocks containing more than one conditional "
19120 "instruction are performance deprecated in ARMv8-A and "
19122 now_it
.warn_deprecated
= TRUE
;
19126 is_last
= (now_it
.mask
== 0x10);
19129 now_it
.state
= OUTSIDE_IT_BLOCK
;
19135 force_automatic_it_block_close (void)
19137 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
19139 close_automatic_it_block ();
19140 now_it
.state
= OUTSIDE_IT_BLOCK
;
19148 if (!now_it
.state_handled
)
19149 handle_it_state ();
19151 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
19154 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19155 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19156 here, hence the "known" in the function name. */
19159 known_t32_only_insn (const struct asm_opcode
*opcode
)
19161 /* Original Thumb-1 wide instruction. */
19162 if (opcode
->tencode
== do_t_blx
19163 || opcode
->tencode
== do_t_branch23
19164 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
19165 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
19168 /* Wide-only instruction added to ARMv8-M Baseline. */
19169 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
19170 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
19171 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
19172 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
19178 /* Whether wide instruction variant can be used if available for a valid OPCODE
19182 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
19184 if (known_t32_only_insn (opcode
))
19187 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19188 of variant T3 of B.W is checked in do_t_branch. */
19189 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19190 && opcode
->tencode
== do_t_branch
)
19193 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19194 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19195 && opcode
->tencode
== do_t_mov_cmp
19196 /* Make sure CMP instruction is not affected. */
19197 && opcode
->aencode
== do_mov
)
19200 /* Wide instruction variants of all instructions with narrow *and* wide
19201 variants become available with ARMv6t2. Other opcodes are either
19202 narrow-only or wide-only and are thus available if OPCODE is valid. */
19203 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
19206 /* OPCODE with narrow only instruction variant or wide variant not
19212 md_assemble (char *str
)
19215 const struct asm_opcode
* opcode
;
19217 /* Align the previous label if needed. */
19218 if (last_label_seen
!= NULL
)
19220 symbol_set_frag (last_label_seen
, frag_now
);
19221 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
19222 S_SET_SEGMENT (last_label_seen
, now_seg
);
19225 memset (&inst
, '\0', sizeof (inst
));
19227 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
19228 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
19230 opcode
= opcode_lookup (&p
);
19233 /* It wasn't an instruction, but it might be a register alias of
19234 the form alias .req reg, or a Neon .dn/.qn directive. */
19235 if (! create_register_alias (str
, p
)
19236 && ! create_neon_reg_alias (str
, p
))
19237 as_bad (_("bad instruction `%s'"), str
);
19242 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
19243 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19245 /* The value which unconditional instructions should have in place of the
19246 condition field. */
19247 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
19251 arm_feature_set variant
;
19253 variant
= cpu_variant
;
19254 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19255 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
19256 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
19257 /* Check that this instruction is supported for this CPU. */
19258 if (!opcode
->tvariant
19259 || (thumb_mode
== 1
19260 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
19262 if (opcode
->tencode
== do_t_swi
)
19263 as_bad (_("SVC is not permitted on this architecture"));
19265 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
19268 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
19269 && opcode
->tencode
!= do_t_branch
)
19271 as_bad (_("Thumb does not support conditional execution"));
19275 /* Two things are addressed here:
19276 1) Implicit require narrow instructions on Thumb-1.
19277 This avoids relaxation accidentally introducing Thumb-2
19279 2) Reject wide instructions in non Thumb-2 cores.
19281 Only instructions with narrow and wide variants need to be handled
19282 but selecting all non wide-only instructions is easier. */
19283 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
19284 && !t32_insn_ok (variant
, opcode
))
19286 if (inst
.size_req
== 0)
19288 else if (inst
.size_req
== 4)
19290 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
19291 as_bad (_("selected processor does not support 32bit wide "
19292 "variant of instruction `%s'"), str
);
19294 as_bad (_("selected processor does not support `%s' in "
19295 "Thumb-2 mode"), str
);
19300 inst
.instruction
= opcode
->tvalue
;
19302 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
19304 /* Prepare the it_insn_type for those encodings that don't set
19306 it_fsm_pre_encode ();
19308 opcode
->tencode ();
19310 it_fsm_post_encode ();
19313 if (!(inst
.error
|| inst
.relax
))
19315 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
19316 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
19317 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
19319 as_bad (_("cannot honor width suffix -- `%s'"), str
);
19324 /* Something has gone badly wrong if we try to relax a fixed size
19326 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
19328 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19329 *opcode
->tvariant
);
19330 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
19331 set those bits when Thumb-2 32-bit instructions are seen. The impact
19332 of relaxable instructions will be considered later after we finish all
19334 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
19335 variant
= arm_arch_none
;
19337 variant
= cpu_variant
;
19338 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
19339 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19342 check_neon_suffixes
;
19346 mapping_state (MAP_THUMB
);
19349 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
19353 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
19354 is_bx
= (opcode
->aencode
== do_bx
);
19356 /* Check that this instruction is supported for this CPU. */
19357 if (!(is_bx
&& fix_v4bx
)
19358 && !(opcode
->avariant
&&
19359 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
19361 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
19366 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
19370 inst
.instruction
= opcode
->avalue
;
19371 if (opcode
->tag
== OT_unconditionalF
)
19372 inst
.instruction
|= 0xFU
<< 28;
19374 inst
.instruction
|= inst
.cond
<< 28;
19375 inst
.size
= INSN_SIZE
;
19376 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
19378 it_fsm_pre_encode ();
19379 opcode
->aencode ();
19380 it_fsm_post_encode ();
19382 /* Arm mode bx is marked as both v4T and v5 because it's still required
19383 on a hypothetical non-thumb v5 core. */
19385 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
19387 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
19388 *opcode
->avariant
);
19390 check_neon_suffixes
;
19394 mapping_state (MAP_ARM
);
19399 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
19407 check_it_blocks_finished (void)
19412 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
19413 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
19414 == MANUAL_IT_BLOCK
)
19416 as_warn (_("section '%s' finished with an open IT block."),
19420 if (now_it
.state
== MANUAL_IT_BLOCK
)
19421 as_warn (_("file finished with an open IT block."));
19425 /* Various frobbings of labels and their addresses. */
19428 arm_start_line_hook (void)
19430 last_label_seen
= NULL
;
19434 arm_frob_label (symbolS
* sym
)
19436 last_label_seen
= sym
;
19438 ARM_SET_THUMB (sym
, thumb_mode
);
19440 #if defined OBJ_COFF || defined OBJ_ELF
19441 ARM_SET_INTERWORK (sym
, support_interwork
);
19444 force_automatic_it_block_close ();
19446 /* Note - do not allow local symbols (.Lxxx) to be labelled
19447 as Thumb functions. This is because these labels, whilst
19448 they exist inside Thumb code, are not the entry points for
19449 possible ARM->Thumb calls. Also, these labels can be used
19450 as part of a computed goto or switch statement. eg gcc
19451 can generate code that looks like this:
19453 ldr r2, [pc, .Laaa]
19463 The first instruction loads the address of the jump table.
19464 The second instruction converts a table index into a byte offset.
19465 The third instruction gets the jump address out of the table.
19466 The fourth instruction performs the jump.
19468 If the address stored at .Laaa is that of a symbol which has the
19469 Thumb_Func bit set, then the linker will arrange for this address
19470 to have the bottom bit set, which in turn would mean that the
19471 address computation performed by the third instruction would end
19472 up with the bottom bit set. Since the ARM is capable of unaligned
19473 word loads, the instruction would then load the incorrect address
19474 out of the jump table, and chaos would ensue. */
19475 if (label_is_thumb_function_name
19476 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
19477 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
19479 /* When the address of a Thumb function is taken the bottom
19480 bit of that address should be set. This will allow
19481 interworking between Arm and Thumb functions to work
19484 THUMB_SET_FUNC (sym
, 1);
19486 label_is_thumb_function_name
= FALSE
;
19489 dwarf2_emit_label (sym
);
19493 arm_data_in_code (void)
19495 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
19497 *input_line_pointer
= '/';
19498 input_line_pointer
+= 5;
19499 *input_line_pointer
= 0;
19507 arm_canonicalize_symbol_name (char * name
)
19511 if (thumb_mode
&& (len
= strlen (name
)) > 5
19512 && streq (name
+ len
- 5, "/data"))
19513 *(name
+ len
- 5) = 0;
19518 /* Table of all register names defined by default. The user can
19519 define additional names with .req. Note that all register names
19520 should appear in both upper and lowercase variants. Some registers
19521 also have mixed-case names. */
19523 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19524 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19525 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19526 #define REGSET(p,t) \
19527 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19528 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19529 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19530 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19531 #define REGSETH(p,t) \
19532 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19533 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19534 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19535 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19536 #define REGSET2(p,t) \
19537 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19538 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19539 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19540 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19541 #define SPLRBANK(base,bank,t) \
19542 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19543 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19544 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19545 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19546 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19547 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19549 static const struct reg_entry reg_names
[] =
19551 /* ARM integer registers. */
19552 REGSET(r
, RN
), REGSET(R
, RN
),
19554 /* ATPCS synonyms. */
19555 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
19556 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
19557 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
19559 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
19560 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
19561 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
19563 /* Well-known aliases. */
19564 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
19565 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
19567 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
19568 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
19570 /* Coprocessor numbers. */
19571 REGSET(p
, CP
), REGSET(P
, CP
),
19573 /* Coprocessor register numbers. The "cr" variants are for backward
19575 REGSET(c
, CN
), REGSET(C
, CN
),
19576 REGSET(cr
, CN
), REGSET(CR
, CN
),
19578 /* ARM banked registers. */
19579 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
19580 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
19581 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
19582 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
19583 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
19584 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
19585 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
19587 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
19588 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
19589 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
19590 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
19591 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
19592 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
19593 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
19594 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
19596 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
19597 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
19598 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
19599 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
19600 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
19601 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
19602 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
19603 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
19604 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
19606 /* FPA registers. */
19607 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
19608 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
19610 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
19611 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
19613 /* VFP SP registers. */
19614 REGSET(s
,VFS
), REGSET(S
,VFS
),
19615 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
19617 /* VFP DP Registers. */
19618 REGSET(d
,VFD
), REGSET(D
,VFD
),
19619 /* Extra Neon DP registers. */
19620 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
19622 /* Neon QP registers. */
19623 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
19625 /* VFP control registers. */
19626 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
19627 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
19628 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
19629 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
19630 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
19631 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
19632 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
19634 /* Maverick DSP coprocessor registers. */
19635 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
19636 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
19638 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
19639 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
19640 REGDEF(dspsc
,0,DSPSC
),
19642 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
19643 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
19644 REGDEF(DSPSC
,0,DSPSC
),
19646 /* iWMMXt data registers - p0, c0-15. */
19647 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
19649 /* iWMMXt control registers - p1, c0-3. */
19650 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
19651 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
19652 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
19653 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
19655 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19656 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
19657 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
19658 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
19659 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
19661 /* XScale accumulator registers. */
19662 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
19668 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19669 within psr_required_here. */
19670 static const struct asm_psr psrs
[] =
19672 /* Backward compatibility notation. Note that "all" is no longer
19673 truly all possible PSR bits. */
19674 {"all", PSR_c
| PSR_f
},
19678 /* Individual flags. */
19684 /* Combinations of flags. */
19685 {"fs", PSR_f
| PSR_s
},
19686 {"fx", PSR_f
| PSR_x
},
19687 {"fc", PSR_f
| PSR_c
},
19688 {"sf", PSR_s
| PSR_f
},
19689 {"sx", PSR_s
| PSR_x
},
19690 {"sc", PSR_s
| PSR_c
},
19691 {"xf", PSR_x
| PSR_f
},
19692 {"xs", PSR_x
| PSR_s
},
19693 {"xc", PSR_x
| PSR_c
},
19694 {"cf", PSR_c
| PSR_f
},
19695 {"cs", PSR_c
| PSR_s
},
19696 {"cx", PSR_c
| PSR_x
},
19697 {"fsx", PSR_f
| PSR_s
| PSR_x
},
19698 {"fsc", PSR_f
| PSR_s
| PSR_c
},
19699 {"fxs", PSR_f
| PSR_x
| PSR_s
},
19700 {"fxc", PSR_f
| PSR_x
| PSR_c
},
19701 {"fcs", PSR_f
| PSR_c
| PSR_s
},
19702 {"fcx", PSR_f
| PSR_c
| PSR_x
},
19703 {"sfx", PSR_s
| PSR_f
| PSR_x
},
19704 {"sfc", PSR_s
| PSR_f
| PSR_c
},
19705 {"sxf", PSR_s
| PSR_x
| PSR_f
},
19706 {"sxc", PSR_s
| PSR_x
| PSR_c
},
19707 {"scf", PSR_s
| PSR_c
| PSR_f
},
19708 {"scx", PSR_s
| PSR_c
| PSR_x
},
19709 {"xfs", PSR_x
| PSR_f
| PSR_s
},
19710 {"xfc", PSR_x
| PSR_f
| PSR_c
},
19711 {"xsf", PSR_x
| PSR_s
| PSR_f
},
19712 {"xsc", PSR_x
| PSR_s
| PSR_c
},
19713 {"xcf", PSR_x
| PSR_c
| PSR_f
},
19714 {"xcs", PSR_x
| PSR_c
| PSR_s
},
19715 {"cfs", PSR_c
| PSR_f
| PSR_s
},
19716 {"cfx", PSR_c
| PSR_f
| PSR_x
},
19717 {"csf", PSR_c
| PSR_s
| PSR_f
},
19718 {"csx", PSR_c
| PSR_s
| PSR_x
},
19719 {"cxf", PSR_c
| PSR_x
| PSR_f
},
19720 {"cxs", PSR_c
| PSR_x
| PSR_s
},
19721 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
19722 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
19723 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
19724 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
19725 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
19726 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
19727 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
19728 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
19729 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
19730 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
19731 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
19732 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
19733 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
19734 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
19735 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
19736 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
19737 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
19738 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
19739 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
19740 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
19741 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
19742 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
19743 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
19744 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
19747 /* Table of V7M psr names. */
19748 static const struct asm_psr v7m_psrs
[] =
19750 {"apsr", 0x0 }, {"APSR", 0x0 },
19751 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19752 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19753 {"psr", 0x3 }, {"PSR", 0x3 },
19754 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19755 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19756 {"epsr", 0x6 }, {"EPSR", 0x6 },
19757 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19758 {"msp", 0x8 }, {"MSP", 0x8 },
19759 {"psp", 0x9 }, {"PSP", 0x9 },
19760 {"msplim", 0xa }, {"MSPLIM", 0xa },
19761 {"psplim", 0xb }, {"PSPLIM", 0xb },
19762 {"primask", 0x10}, {"PRIMASK", 0x10},
19763 {"basepri", 0x11}, {"BASEPRI", 0x11},
19764 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19765 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19766 {"control", 0x14}, {"CONTROL", 0x14},
19767 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19768 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19769 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19770 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19771 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19772 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19773 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19774 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19775 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19778 /* Table of all shift-in-operand names. */
19779 static const struct asm_shift_name shift_names
[] =
19781 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
19782 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
19783 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
19784 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
19785 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
19786 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
19789 /* Table of all explicit relocation names. */
19791 static struct reloc_entry reloc_names
[] =
19793 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
19794 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
19795 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
19796 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
19797 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
19798 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
19799 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
19800 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
19801 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
19802 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
19803 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
19804 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
19805 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
19806 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
19807 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
19808 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
19809 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
19810 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
19811 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
19812 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
19813 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
19814 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
19815 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
19816 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
19817 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
19818 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
19819 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
19823 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19824 static const struct asm_cond conds
[] =
19828 {"cs", 0x2}, {"hs", 0x2},
19829 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19843 #define UL_BARRIER(L,U,CODE,FEAT) \
19844 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19845 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19847 static struct asm_barrier_opt barrier_opt_names
[] =
19849 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
19850 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
19851 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
19852 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
19853 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
19854 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
19855 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
19856 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
19857 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
19858 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
19859 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
19860 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
19861 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
19862 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
19863 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
19864 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
19869 /* Table of ARM-format instructions. */
19871 /* Macros for gluing together operand strings. N.B. In all cases
19872 other than OPS0, the trailing OP_stop comes from default
19873 zero-initialization of the unspecified elements of the array. */
19874 #define OPS0() { OP_stop, }
19875 #define OPS1(a) { OP_##a, }
19876 #define OPS2(a,b) { OP_##a,OP_##b, }
19877 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19878 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19879 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19880 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19882 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19883 This is useful when mixing operands for ARM and THUMB, i.e. using the
19884 MIX_ARM_THUMB_OPERANDS macro.
19885 In order to use these macros, prefix the number of operands with _
19887 #define OPS_1(a) { a, }
19888 #define OPS_2(a,b) { a,b, }
19889 #define OPS_3(a,b,c) { a,b,c, }
19890 #define OPS_4(a,b,c,d) { a,b,c,d, }
19891 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19892 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19894 /* These macros abstract out the exact format of the mnemonic table and
19895 save some repeated characters. */
19897 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19898 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19899 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19900 THUMB_VARIANT, do_##ae, do_##te }
19902 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19903 a T_MNEM_xyz enumerator. */
19904 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19905 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19906 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19907 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19909 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19910 infix after the third character. */
19911 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19912 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19913 THUMB_VARIANT, do_##ae, do_##te }
19914 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19915 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19916 THUMB_VARIANT, do_##ae, do_##te }
19917 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19918 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19919 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19920 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19921 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19922 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19923 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19924 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19926 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19927 field is still 0xE. Many of the Thumb variants can be executed
19928 conditionally, so this is checked separately. */
19929 #define TUE(mnem, op, top, nops, ops, ae, te) \
19930 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19931 THUMB_VARIANT, do_##ae, do_##te }
19933 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19934 Used by mnemonics that have very minimal differences in the encoding for
19935 ARM and Thumb variants and can be handled in a common function. */
19936 #define TUEc(mnem, op, top, nops, ops, en) \
19937 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19938 THUMB_VARIANT, do_##en, do_##en }
19940 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19941 condition code field. */
19942 #define TUF(mnem, op, top, nops, ops, ae, te) \
19943 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19944 THUMB_VARIANT, do_##ae, do_##te }
19946 /* ARM-only variants of all the above. */
19947 #define CE(mnem, op, nops, ops, ae) \
19948 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19950 #define C3(mnem, op, nops, ops, ae) \
19951 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19953 /* Thumb-only variants of TCE and TUE. */
19954 #define ToC(mnem, top, nops, ops, te) \
19955 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19958 #define ToU(mnem, top, nops, ops, te) \
19959 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19962 /* T_MNEM_xyz enumerator variants of ToC. */
19963 #define toC(mnem, top, nops, ops, te) \
19964 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
19967 /* T_MNEM_xyz enumerator variants of ToU. */
19968 #define toU(mnem, top, nops, ops, te) \
19969 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
19972 /* Legacy mnemonics that always have conditional infix after the third
19974 #define CL(mnem, op, nops, ops, ae) \
19975 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19976 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19978 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19979 #define cCE(mnem, op, nops, ops, ae) \
19980 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19982 /* Legacy coprocessor instructions where conditional infix and conditional
19983 suffix are ambiguous. For consistency this includes all FPA instructions,
19984 not just the potentially ambiguous ones. */
19985 #define cCL(mnem, op, nops, ops, ae) \
19986 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19987 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19989 /* Coprocessor, takes either a suffix or a position-3 infix
19990 (for an FPA corner case). */
19991 #define C3E(mnem, op, nops, ops, ae) \
19992 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19993 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19995 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19996 { m1 #m2 m3, OPS##nops ops, \
19997 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19998 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
20000 #define CM(m1, m2, op, nops, ops, ae) \
20001 xCM_ (m1, , m2, op, nops, ops, ae), \
20002 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20003 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20004 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20005 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20006 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20007 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20008 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20009 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20010 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20011 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20012 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20013 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20014 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20015 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20016 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20017 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20018 xCM_ (m1, le, m2, op, nops, ops, ae), \
20019 xCM_ (m1, al, m2, op, nops, ops, ae)
20021 #define UE(mnem, op, nops, ops, ae) \
20022 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20024 #define UF(mnem, op, nops, ops, ae) \
20025 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20027 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20028 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20029 use the same encoding function for each. */
20030 #define NUF(mnem, op, nops, ops, enc) \
20031 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20032 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20034 /* Neon data processing, version which indirects through neon_enc_tab for
20035 the various overloaded versions of opcodes. */
20036 #define nUF(mnem, op, nops, ops, enc) \
20037 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20038 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20040 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20042 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
20043 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20044 THUMB_VARIANT, do_##enc, do_##enc }
20046 #define NCE(mnem, op, nops, ops, enc) \
20047 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20049 #define NCEF(mnem, op, nops, ops, enc) \
20050 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20052 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20053 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
20054 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20055 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20057 #define nCE(mnem, op, nops, ops, enc) \
20058 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20060 #define nCEF(mnem, op, nops, ops, enc) \
20061 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20065 static const struct asm_opcode insns
[] =
20067 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20068 #define THUMB_VARIANT & arm_ext_v4t
20069 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20070 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20071 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20072 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20073 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20074 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20075 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20076 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20077 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20078 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20079 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20080 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20081 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20082 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20083 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20084 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20086 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20087 for setting PSR flag bits. They are obsolete in V6 and do not
20088 have Thumb equivalents. */
20089 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20090 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20091 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
20092 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20093 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20094 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
20095 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20096 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20097 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
20099 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
20100 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
20101 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20102 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20104 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
20105 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20106 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
20108 OP_ADDRGLDR
),ldst
, t_ldst
),
20109 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20111 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20112 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20113 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20114 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20115 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20116 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20118 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
20119 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
20122 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
20123 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
20124 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
20125 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
20127 /* Thumb-compatibility pseudo ops. */
20128 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20129 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20130 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20131 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20132 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20133 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20134 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20135 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20136 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
20137 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
20138 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
20139 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
20141 /* These may simplify to neg. */
20142 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20143 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20145 #undef THUMB_VARIANT
20146 #define THUMB_VARIANT & arm_ext_os
20148 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20149 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20151 #undef THUMB_VARIANT
20152 #define THUMB_VARIANT & arm_ext_v6
20154 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
20156 /* V1 instructions with no Thumb analogue prior to V6T2. */
20157 #undef THUMB_VARIANT
20158 #define THUMB_VARIANT & arm_ext_v6t2
20160 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20161 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20162 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
20164 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20165 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20166 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
20167 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20169 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20170 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20172 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20173 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20175 /* V1 instructions with no Thumb analogue at all. */
20176 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
20177 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
20179 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20180 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20181 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20182 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20183 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20184 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20185 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20186 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20189 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20190 #undef THUMB_VARIANT
20191 #define THUMB_VARIANT & arm_ext_v4t
20193 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20194 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20196 #undef THUMB_VARIANT
20197 #define THUMB_VARIANT & arm_ext_v6t2
20199 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20200 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
20202 /* Generic coprocessor instructions. */
20203 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20204 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20205 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20206 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20207 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20208 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20209 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20212 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20214 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20215 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20218 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20219 #undef THUMB_VARIANT
20220 #define THUMB_VARIANT & arm_ext_msr
20222 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
20223 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
20226 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20227 #undef THUMB_VARIANT
20228 #define THUMB_VARIANT & arm_ext_v6t2
20230 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20231 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20232 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20233 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20234 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20235 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20236 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20237 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20240 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20241 #undef THUMB_VARIANT
20242 #define THUMB_VARIANT & arm_ext_v4t
20244 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20245 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20246 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20247 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20248 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20249 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20252 #define ARM_VARIANT & arm_ext_v4t_5
20254 /* ARM Architecture 4T. */
20255 /* Note: bx (and blx) are required on V5, even if the processor does
20256 not support Thumb. */
20257 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
20260 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
20261 #undef THUMB_VARIANT
20262 #define THUMB_VARIANT & arm_ext_v5t
20264 /* Note: blx has 2 variants; the .value coded here is for
20265 BLX(2). Only this variant has conditional execution. */
20266 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
20267 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
20269 #undef THUMB_VARIANT
20270 #define THUMB_VARIANT & arm_ext_v6t2
20272 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
20273 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20274 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20275 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20276 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20277 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20278 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20279 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20282 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
20283 #undef THUMB_VARIANT
20284 #define THUMB_VARIANT & arm_ext_v5exp
20286 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20287 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20288 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20289 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20291 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20292 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20294 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20295 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20296 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20297 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20299 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20300 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20301 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20302 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20304 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20305 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20307 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20308 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20309 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20310 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20313 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
20314 #undef THUMB_VARIANT
20315 #define THUMB_VARIANT & arm_ext_v6t2
20317 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
20318 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
20320 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
20321 ADDRGLDRS
), ldrd
, t_ldstd
),
20323 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20324 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20327 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
20329 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
20332 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
20333 #undef THUMB_VARIANT
20334 #define THUMB_VARIANT & arm_ext_v6
20336 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20337 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20338 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20339 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20340 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20341 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20342 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20343 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20344 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20345 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
20347 #undef THUMB_VARIANT
20348 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20350 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
20351 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20353 #undef THUMB_VARIANT
20354 #define THUMB_VARIANT & arm_ext_v6t2
20356 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20357 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20359 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
20360 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
20362 /* ARM V6 not included in V7M. */
20363 #undef THUMB_VARIANT
20364 #define THUMB_VARIANT & arm_ext_v6_notm
20365 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20366 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20367 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
20368 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
20369 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
20370 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20371 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
20372 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
20373 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
20374 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20375 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20376 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20377 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
20378 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
20379 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
20380 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
20381 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
20382 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
20383 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
20385 /* ARM V6 not included in V7M (eg. integer SIMD). */
20386 #undef THUMB_VARIANT
20387 #define THUMB_VARIANT & arm_ext_v6_dsp
20388 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
20389 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
20390 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20391 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20392 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20393 /* Old name for QASX. */
20394 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20395 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20396 /* Old name for QSAX. */
20397 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20398 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20399 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20400 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20401 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20402 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20403 /* Old name for SASX. */
20404 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20405 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20406 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20407 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20408 /* Old name for SHASX. */
20409 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20410 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20411 /* Old name for SHSAX. */
20412 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20413 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20414 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20415 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20416 /* Old name for SSAX. */
20417 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20418 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20419 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20420 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20421 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20422 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20423 /* Old name for UASX. */
20424 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20425 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20426 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20427 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20428 /* Old name for UHASX. */
20429 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20430 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20431 /* Old name for UHSAX. */
20432 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20433 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20434 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20435 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20436 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20437 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20438 /* Old name for UQASX. */
20439 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20440 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20441 /* Old name for UQSAX. */
20442 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20443 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20444 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20445 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20446 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20447 /* Old name for USAX. */
20448 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20449 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20450 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20451 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20452 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20453 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20454 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20455 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20456 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20457 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20458 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20459 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20460 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20461 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20462 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20463 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20464 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20465 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20466 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20467 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20468 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20469 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20470 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20471 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20472 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20473 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20474 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20475 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20476 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20477 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
20478 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
20479 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20480 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20481 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
20484 #define ARM_VARIANT & arm_ext_v6k_v6t2
20485 #undef THUMB_VARIANT
20486 #define THUMB_VARIANT & arm_ext_v6k_v6t2
20488 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
20489 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
20490 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
20491 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
20493 #undef THUMB_VARIANT
20494 #define THUMB_VARIANT & arm_ext_v6_notm
20495 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
20497 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
20498 RRnpcb
), strexd
, t_strexd
),
20500 #undef THUMB_VARIANT
20501 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20502 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
20504 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
20506 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20508 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20510 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
20513 #define ARM_VARIANT & arm_ext_sec
20514 #undef THUMB_VARIANT
20515 #define THUMB_VARIANT & arm_ext_sec
20517 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
20520 #define ARM_VARIANT & arm_ext_virt
20521 #undef THUMB_VARIANT
20522 #define THUMB_VARIANT & arm_ext_virt
20524 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
20525 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
20528 #define ARM_VARIANT & arm_ext_pan
20529 #undef THUMB_VARIANT
20530 #define THUMB_VARIANT & arm_ext_pan
20532 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
20535 #define ARM_VARIANT & arm_ext_v6t2
20536 #undef THUMB_VARIANT
20537 #define THUMB_VARIANT & arm_ext_v6t2
20539 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
20540 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
20541 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
20542 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
20544 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20545 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
20547 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20548 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20549 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20550 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20553 #define ARM_VARIANT & arm_ext_v3
20554 #undef THUMB_VARIANT
20555 #define THUMB_VARIANT & arm_ext_v6t2
20557 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
20558 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
20559 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
20562 #define ARM_VARIANT & arm_ext_v6t2
20563 #undef THUMB_VARIANT
20564 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20565 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
20566 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
20568 /* Thumb-only instructions. */
20570 #define ARM_VARIANT NULL
20571 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
20572 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
20574 /* ARM does not really have an IT instruction, so always allow it.
20575 The opcode is copied from Thumb in order to allow warnings in
20576 -mimplicit-it=[never | arm] modes. */
20578 #define ARM_VARIANT & arm_ext_v1
20579 #undef THUMB_VARIANT
20580 #define THUMB_VARIANT & arm_ext_v6t2
20582 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
20583 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
20584 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
20585 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
20586 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
20587 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
20588 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
20589 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
20590 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
20591 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
20592 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
20593 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
20594 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
20595 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
20596 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
20597 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20598 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
20599 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
20601 /* Thumb2 only instructions. */
20603 #define ARM_VARIANT NULL
20605 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
20606 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
20607 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
20608 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
20609 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
20610 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
20612 /* Hardware division instructions. */
20614 #define ARM_VARIANT & arm_ext_adiv
20615 #undef THUMB_VARIANT
20616 #define THUMB_VARIANT & arm_ext_div
20618 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
20619 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
20621 /* ARM V6M/V7 instructions. */
20623 #define ARM_VARIANT & arm_ext_barrier
20624 #undef THUMB_VARIANT
20625 #define THUMB_VARIANT & arm_ext_barrier
20627 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
20628 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
20629 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
20631 /* ARM V7 instructions. */
20633 #define ARM_VARIANT & arm_ext_v7
20634 #undef THUMB_VARIANT
20635 #define THUMB_VARIANT & arm_ext_v7
20637 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
20638 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
20641 #define ARM_VARIANT & arm_ext_mp
20642 #undef THUMB_VARIANT
20643 #define THUMB_VARIANT & arm_ext_mp
20645 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
20647 /* AArchv8 instructions. */
20649 #define ARM_VARIANT & arm_ext_v8
20651 /* Instructions shared between armv8-a and armv8-m. */
20652 #undef THUMB_VARIANT
20653 #define THUMB_VARIANT & arm_ext_atomics
20655 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20656 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20657 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20658 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20659 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20660 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20661 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20662 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
20663 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20664 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20666 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20668 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20670 #undef THUMB_VARIANT
20671 #define THUMB_VARIANT & arm_ext_v8
20673 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
20674 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
20676 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
20679 /* Defined in V8 but is in undefined encoding space for earlier
20680 architectures. However earlier architectures are required to treat
20681 this instuction as a semihosting trap as well. Hence while not explicitly
20682 defined as such, it is in fact correct to define the instruction for all
20684 #undef THUMB_VARIANT
20685 #define THUMB_VARIANT & arm_ext_v1
20687 #define ARM_VARIANT & arm_ext_v1
20688 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
20690 /* ARMv8 T32 only. */
20692 #define ARM_VARIANT NULL
20693 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
20694 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
20695 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
20697 /* FP for ARMv8. */
20699 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20700 #undef THUMB_VARIANT
20701 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20703 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20704 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20705 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20706 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20707 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
20708 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
20709 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
20710 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
20711 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
20712 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
20713 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
20714 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
20715 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
20716 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
20717 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
20718 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
20719 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
20721 /* Crypto v1 extensions. */
20723 #define ARM_VARIANT & fpu_crypto_ext_armv8
20724 #undef THUMB_VARIANT
20725 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20727 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
20728 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
20729 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
20730 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
20731 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
20732 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
20733 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
20734 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
20735 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
20736 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
20737 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
20738 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
20739 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
20740 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
20743 #define ARM_VARIANT & crc_ext_armv8
20744 #undef THUMB_VARIANT
20745 #define THUMB_VARIANT & crc_ext_armv8
20746 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
20747 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
20748 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
20749 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
20750 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
20751 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
20753 /* ARMv8.2 RAS extension. */
20755 #define ARM_VARIANT & arm_ext_ras
20756 #undef THUMB_VARIANT
20757 #define THUMB_VARIANT & arm_ext_ras
20758 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
20761 #define ARM_VARIANT & arm_ext_v8_3
20762 #undef THUMB_VARIANT
20763 #define THUMB_VARIANT & arm_ext_v8_3
20764 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
20765 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
20766 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
20769 #define ARM_VARIANT & fpu_neon_ext_dotprod
20770 #undef THUMB_VARIANT
20771 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20772 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
20773 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
20776 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20777 #undef THUMB_VARIANT
20778 #define THUMB_VARIANT NULL
20780 cCE("wfs", e200110
, 1, (RR
), rd
),
20781 cCE("rfs", e300110
, 1, (RR
), rd
),
20782 cCE("wfc", e400110
, 1, (RR
), rd
),
20783 cCE("rfc", e500110
, 1, (RR
), rd
),
20785 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20786 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20787 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20788 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20790 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20791 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20792 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20793 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20795 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
20796 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
20797 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
20798 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
20799 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
20800 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
20801 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
20802 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
20803 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
20804 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
20805 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
20806 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
20808 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
20809 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
20810 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
20811 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
20812 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
20813 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
20814 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
20815 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
20816 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
20817 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
20818 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
20819 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
20821 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
20822 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
20823 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
20824 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
20825 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
20826 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
20827 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
20828 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
20829 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
20830 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
20831 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
20832 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
20834 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
20835 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
20836 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
20837 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
20838 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
20839 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
20840 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
20841 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
20842 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
20843 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
20844 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
20845 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
20847 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
20848 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
20849 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
20850 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
20851 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
20852 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
20853 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
20854 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
20855 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
20856 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
20857 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
20858 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
20860 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
20861 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
20862 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
20863 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
20864 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
20865 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
20866 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
20867 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
20868 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
20869 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
20870 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
20871 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
20873 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
20874 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
20875 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
20876 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
20877 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
20878 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
20879 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
20880 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
20881 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
20882 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
20883 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
20884 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
20886 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
20887 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
20888 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
20889 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
20890 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
20891 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
20892 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
20893 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
20894 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
20895 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
20896 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
20897 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
20899 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
20900 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
20901 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
20902 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
20903 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
20904 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
20905 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
20906 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
20907 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
20908 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
20909 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
20910 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
20912 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
20913 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
20914 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
20915 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
20916 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
20917 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
20918 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
20919 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
20920 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
20921 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
20922 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
20923 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
20925 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
20926 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
20927 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
20928 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
20929 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
20930 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
20931 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
20932 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
20933 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
20934 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
20935 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
20936 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
20938 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
20939 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
20940 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
20941 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
20942 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
20943 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
20944 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
20945 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
20946 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
20947 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
20948 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
20949 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
20951 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
20952 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
20953 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
20954 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
20955 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
20956 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
20957 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
20958 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
20959 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
20960 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
20961 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
20962 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
20964 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
20965 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
20966 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
20967 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
20968 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
20969 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
20970 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
20971 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
20972 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
20973 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
20974 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
20975 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
20977 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
20978 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
20979 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
20980 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
20981 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
20982 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
20983 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
20984 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
20985 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
20986 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
20987 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
20988 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
20990 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
20991 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
20992 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
20993 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
20994 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
20995 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
20996 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
20997 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
20998 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
20999 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
21000 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
21001 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
21003 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21004 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21005 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21006 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21007 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21008 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21009 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21010 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21011 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21012 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21013 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21014 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21016 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21017 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21018 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21019 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21020 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21021 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21022 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21023 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21024 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21025 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21026 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21027 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21029 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21030 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21031 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21032 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21033 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21034 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21035 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21036 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21037 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21038 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21039 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21040 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21042 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21043 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21044 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21045 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21046 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21047 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21048 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21049 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21050 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21051 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21052 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21053 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21055 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21056 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21057 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21058 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21059 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21060 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21061 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21062 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21063 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21064 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21065 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21066 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21068 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21069 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21070 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21071 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21072 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21073 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21074 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21075 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21076 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21077 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21078 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21079 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21081 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21082 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21083 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21084 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21085 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21086 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21087 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21088 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21089 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21090 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21091 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21092 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21094 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21095 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21096 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21097 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21098 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21099 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21100 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21101 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21102 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21103 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21104 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21105 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21107 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21108 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21109 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21110 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21111 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21112 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21113 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21114 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21115 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21116 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21117 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21118 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21120 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21121 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21122 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21123 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21124 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21125 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21126 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21127 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21128 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21129 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21130 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21131 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21133 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21134 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21135 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21136 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21137 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21138 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21139 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21140 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21141 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21142 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21143 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21144 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21146 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21147 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21148 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21149 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21150 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21151 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21152 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21153 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21154 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21155 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21156 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21157 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21159 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21160 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21161 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21162 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21163 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21164 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21165 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21166 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21167 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21168 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21169 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21170 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21172 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21173 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21174 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21175 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21177 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
21178 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
21179 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
21180 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
21181 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
21182 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
21183 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
21184 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
21185 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
21186 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
21187 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
21188 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
21190 /* The implementation of the FIX instruction is broken on some
21191 assemblers, in that it accepts a precision specifier as well as a
21192 rounding specifier, despite the fact that this is meaningless.
21193 To be more compatible, we accept it as well, though of course it
21194 does not set any bits. */
21195 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
21196 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
21197 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
21198 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
21199 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
21200 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
21201 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
21202 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
21203 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
21204 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
21205 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
21206 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
21207 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
21209 /* Instructions that were new with the real FPA, call them V2. */
21211 #define ARM_VARIANT & fpu_fpa_ext_v2
21213 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21214 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21215 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21216 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21217 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21218 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21221 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21223 /* Moves and type conversions. */
21224 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21225 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
21226 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
21227 cCE("fmstat", ef1fa10
, 0, (), noargs
),
21228 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
21229 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
21230 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21231 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21232 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21233 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21234 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21235 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21236 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
21237 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
21239 /* Memory operations. */
21240 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21241 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21242 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21243 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21244 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21245 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21246 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21247 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21248 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21249 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21250 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21251 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21252 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21253 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21254 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21255 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21256 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21257 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21259 /* Monadic operations. */
21260 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21261 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21262 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21264 /* Dyadic operations. */
21265 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21266 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21267 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21268 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21269 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21270 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21271 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21272 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21273 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21276 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21277 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
21278 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21279 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
21281 /* Double precision load/store are still present on single precision
21282 implementations. */
21283 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21284 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21285 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21286 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21287 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21288 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21289 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21290 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21291 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21292 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21295 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
21297 /* Moves and type conversions. */
21298 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21299 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21300 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21301 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21302 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21303 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21304 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21305 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21306 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21307 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21308 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21309 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21310 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21312 /* Monadic operations. */
21313 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21314 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21315 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21317 /* Dyadic operations. */
21318 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21319 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21320 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21321 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21322 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21323 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21324 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21325 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21326 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21329 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21330 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
21331 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21332 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
21335 #define ARM_VARIANT & fpu_vfp_ext_v2
21337 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
21338 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
21339 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
21340 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
21342 /* Instructions which may belong to either the Neon or VFP instruction sets.
21343 Individual encoder functions perform additional architecture checks. */
21345 #define ARM_VARIANT & fpu_vfp_ext_v1xd
21346 #undef THUMB_VARIANT
21347 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
21349 /* These mnemonics are unique to VFP. */
21350 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
21351 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
21352 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21353 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21354 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21355 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
21356 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
21357 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
21358 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
21359 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
21361 /* Mnemonics shared by Neon and VFP. */
21362 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
21363 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
21364 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
21366 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
21367 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
21369 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
21370 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
21372 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21373 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21374 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21375 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21376 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21377 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21379 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
21380 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
21381 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
21382 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
21385 /* NOTE: All VMOV encoding is special-cased! */
21386 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
21387 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
21389 #undef THUMB_VARIANT
21390 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
21391 by different feature bits. Since we are setting the Thumb guard, we can
21392 require Thumb-1 which makes it a nop guard and set the right feature bit in
21393 do_vldr_vstr (). */
21394 #define THUMB_VARIANT & arm_ext_v4t
21395 NCE(vldr
, d100b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
21396 NCE(vstr
, d000b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
21399 #define ARM_VARIANT & arm_ext_fp16
21400 #undef THUMB_VARIANT
21401 #define THUMB_VARIANT & arm_ext_fp16
21402 /* New instructions added from v8.2, allowing the extraction and insertion of
21403 the upper 16 bits of a 32-bit vector register. */
21404 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
21405 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
21407 /* New backported fma/fms instructions optional in v8.2. */
21408 NCE (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
21409 NCE (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
21411 #undef THUMB_VARIANT
21412 #define THUMB_VARIANT & fpu_neon_ext_v1
21414 #define ARM_VARIANT & fpu_neon_ext_v1
21416 /* Data processing with three registers of the same length. */
21417 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
21418 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
21419 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
21420 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21421 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21422 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21423 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21424 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21425 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21426 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
21427 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
21428 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
21429 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
21430 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
21431 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
21432 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
21433 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
21434 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
21435 /* If not immediate, fall back to neon_dyadic_i64_su.
21436 shl_imm should accept I8 I16 I32 I64,
21437 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
21438 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
21439 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
21440 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
21441 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
21442 /* Logic ops, types optional & ignored. */
21443 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21444 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21445 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21446 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21447 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21448 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21449 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21450 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21451 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
21452 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
21453 /* Bitfield ops, untyped. */
21454 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21455 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21456 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21457 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21458 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21459 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21460 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
21461 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21462 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21463 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21464 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21465 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21466 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21467 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
21468 back to neon_dyadic_if_su. */
21469 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
21470 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
21471 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
21472 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
21473 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
21474 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
21475 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
21476 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
21477 /* Comparison. Type I8 I16 I32 F32. */
21478 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
21479 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
21480 /* As above, D registers only. */
21481 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
21482 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
21483 /* Int and float variants, signedness unimportant. */
21484 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
21485 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
21486 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
21487 /* Add/sub take types I8 I16 I32 I64 F32. */
21488 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
21489 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
21490 /* vtst takes sizes 8, 16, 32. */
21491 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
21492 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
21493 /* VMUL takes I8 I16 I32 F32 P8. */
21494 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
21495 /* VQD{R}MULH takes S16 S32. */
21496 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
21497 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
21498 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
21499 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
21500 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
21501 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
21502 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
21503 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
21504 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
21505 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
21506 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
21507 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
21508 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
21509 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
21510 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
21511 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
21512 /* ARM v8.1 extension. */
21513 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
21514 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
21515 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
21516 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
21518 /* Two address, int/float. Types S8 S16 S32 F32. */
21519 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
21520 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
21522 /* Data processing with two registers and a shift amount. */
21523 /* Right shifts, and variants with rounding.
21524 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21525 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
21526 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
21527 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
21528 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
21529 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
21530 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
21531 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
21532 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
21533 /* Shift and insert. Sizes accepted 8 16 32 64. */
21534 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
21535 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
21536 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
21537 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
21538 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21539 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
21540 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
21541 /* Right shift immediate, saturating & narrowing, with rounding variants.
21542 Types accepted S16 S32 S64 U16 U32 U64. */
21543 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
21544 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
21545 /* As above, unsigned. Types accepted S16 S32 S64. */
21546 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
21547 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
21548 /* Right shift narrowing. Types accepted I16 I32 I64. */
21549 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
21550 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
21551 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21552 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
21553 /* CVT with optional immediate for fixed-point variant. */
21554 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
21556 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
21557 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
21559 /* Data processing, three registers of different lengths. */
21560 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21561 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
21562 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21563 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21564 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21565 /* If not scalar, fall back to neon_dyadic_long.
21566 Vector types as above, scalar types S16 S32 U16 U32. */
21567 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
21568 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
21569 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21570 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
21571 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
21572 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21573 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21574 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21575 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21576 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21577 /* Saturating doubling multiplies. Types S16 S32. */
21578 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21579 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21580 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21581 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21582 S16 S32 U16 U32. */
21583 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
21585 /* Extract. Size 8. */
21586 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
21587 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
21589 /* Two registers, miscellaneous. */
21590 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21591 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
21592 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
21593 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
21594 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
21595 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
21596 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
21597 /* Vector replicate. Sizes 8 16 32. */
21598 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
21599 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
21600 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21601 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
21602 /* VMOVN. Types I16 I32 I64. */
21603 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
21604 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21605 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
21606 /* VQMOVUN. Types S16 S32 S64. */
21607 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
21608 /* VZIP / VUZP. Sizes 8 16 32. */
21609 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
21610 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
21611 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
21612 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
21613 /* VQABS / VQNEG. Types S8 S16 S32. */
21614 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
21615 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
21616 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
21617 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
21618 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21619 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
21620 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
21621 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
21622 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
21623 /* Reciprocal estimates. Types U32 F16 F32. */
21624 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
21625 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
21626 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
21627 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
21628 /* VCLS. Types S8 S16 S32. */
21629 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
21630 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
21631 /* VCLZ. Types I8 I16 I32. */
21632 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
21633 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
21634 /* VCNT. Size 8. */
21635 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
21636 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
21637 /* Two address, untyped. */
21638 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
21639 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
21640 /* VTRN. Sizes 8 16 32. */
21641 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
21642 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
21644 /* Table lookup. Size 8. */
21645 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
21646 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
21648 #undef THUMB_VARIANT
21649 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21651 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21653 /* Neon element/structure load/store. */
21654 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21655 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21656 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21657 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21658 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21659 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21660 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21661 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21663 #undef THUMB_VARIANT
21664 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21666 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21667 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
21668 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21669 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21670 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21671 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21672 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21673 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21674 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21675 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21677 #undef THUMB_VARIANT
21678 #define THUMB_VARIANT & fpu_vfp_ext_v3
21680 #define ARM_VARIANT & fpu_vfp_ext_v3
21682 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
21683 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21684 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21685 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21686 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21687 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21688 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21689 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21690 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21693 #define ARM_VARIANT & fpu_vfp_ext_fma
21694 #undef THUMB_VARIANT
21695 #define THUMB_VARIANT & fpu_vfp_ext_fma
21696 /* Mnemonics shared by Neon and VFP. These are included in the
21697 VFP FMA variant; NEON and VFP FMA always includes the NEON
21698 FMA instructions. */
21699 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
21700 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
21701 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21702 the v form should always be used. */
21703 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21704 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21705 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21706 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21707 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21708 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21710 #undef THUMB_VARIANT
21712 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21714 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21715 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21716 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21717 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21718 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21719 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21720 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
21721 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
21724 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21726 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
21727 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
21728 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
21729 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
21730 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
21731 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
21732 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
21733 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
21734 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
21735 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21736 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21737 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21738 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21739 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21740 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21741 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21742 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21743 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21744 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
21745 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
21746 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21747 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21748 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21749 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21750 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21751 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21752 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
21753 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
21754 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
21755 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
21756 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
21757 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
21758 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
21759 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
21760 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21761 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21762 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21763 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21764 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21765 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21766 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21767 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21768 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21769 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21770 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21771 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21772 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
21773 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21774 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21775 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21776 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21777 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21778 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21779 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21780 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21781 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21782 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21783 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21784 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21785 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21786 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21787 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21788 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21789 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21790 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21791 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21792 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21793 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21794 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21795 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21796 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21797 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21798 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21799 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21800 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21801 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21802 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21803 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21804 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21805 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21806 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21807 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21808 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21809 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21810 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21811 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21812 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21813 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21814 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
21815 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21816 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21817 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21818 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21819 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21820 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21821 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21822 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21823 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21824 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21825 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21826 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21827 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21828 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21829 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21830 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21831 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21832 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21833 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21834 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21835 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21836 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
21837 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21838 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21839 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21840 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21841 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21842 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21843 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21844 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21845 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21846 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21847 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21848 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21849 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21850 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21851 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21852 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21853 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21854 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21855 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21856 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21857 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21858 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21859 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21860 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21861 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21862 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21863 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21864 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21865 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21866 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21867 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21868 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21869 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21870 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21871 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21872 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21873 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21874 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21875 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21876 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21877 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21878 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21879 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21880 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21881 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21882 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21883 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21884 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21885 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21886 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21887 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
21890 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21892 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
21893 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
21894 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
21895 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21896 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21897 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21898 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21899 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21900 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21901 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21902 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21903 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21904 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21905 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21906 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21907 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21908 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21909 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21910 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21911 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21912 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
21913 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21914 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21915 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21916 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21917 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21918 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21919 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21920 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21921 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21922 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21923 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21924 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21925 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21926 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21927 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21928 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21929 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21930 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21931 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21932 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21933 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21934 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21935 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21936 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21937 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21938 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21939 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21940 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21941 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21942 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21943 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21944 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21945 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21946 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21947 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21948 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21951 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21953 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21954 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21955 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21956 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21957 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21958 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21959 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21960 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21961 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
21962 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
21963 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
21964 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
21965 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
21966 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
21967 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
21968 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
21969 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
21970 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
21971 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
21972 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
21973 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
21974 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
21975 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
21976 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
21977 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
21978 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
21979 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
21980 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
21981 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
21982 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
21983 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
21984 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
21985 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
21986 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
21987 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
21988 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
21989 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
21990 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
21991 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
21992 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
21993 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
21994 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
21995 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
21996 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
21997 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
21998 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
21999 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
22000 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
22001 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
22002 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
22003 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
22004 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
22005 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
22006 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
22007 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22008 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22009 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22010 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22011 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22012 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22013 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
22014 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
22015 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
22016 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
22017 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22018 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22019 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22020 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22021 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22022 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22023 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22024 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22025 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22026 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22027 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22028 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22030 /* ARMv8.5-A instructions. */
22032 #define ARM_VARIANT & arm_ext_sb
22033 #undef THUMB_VARIANT
22034 #define THUMB_VARIANT & arm_ext_sb
22035 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
22038 #define ARM_VARIANT & arm_ext_predres
22039 #undef THUMB_VARIANT
22040 #define THUMB_VARIANT & arm_ext_predres
22041 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
22042 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
22043 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
22045 /* ARMv8-M instructions. */
22047 #define ARM_VARIANT NULL
22048 #undef THUMB_VARIANT
22049 #define THUMB_VARIANT & arm_ext_v8m
22050 ToU("sg", e97fe97f
, 0, (), noargs
),
22051 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
22052 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
22053 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
22054 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
22055 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
22056 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
22058 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22059 instructions behave as nop if no VFP is present. */
22060 #undef THUMB_VARIANT
22061 #define THUMB_VARIANT & arm_ext_v8m_main
22062 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
22063 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
22065 /* Armv8.1-M Mainline instructions. */
22066 #undef THUMB_VARIANT
22067 #define THUMB_VARIANT & arm_ext_v8_1m_main
22068 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
22069 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
22070 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22071 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
22072 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22074 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
22075 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
22076 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
22078 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
),
22079 ToC("vscclrm", ec9f0a00
, 1, (VRSDVLST
), t_vscclrm
)
22082 #undef THUMB_VARIANT
22114 /* MD interface: bits in the object file. */
22116 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
22117 for use in the a.out file, and stores them in the array pointed to by buf.
22118 This knows about the endian-ness of the target machine and does
22119 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
22120 2 (short) and 4 (long) Floating numbers are put out as a series of
22121 LITTLENUMS (shorts, here at least). */
22124 md_number_to_chars (char * buf
, valueT val
, int n
)
22126 if (target_big_endian
)
22127 number_to_chars_bigendian (buf
, val
, n
);
22129 number_to_chars_littleendian (buf
, val
, n
);
22133 md_chars_to_number (char * buf
, int n
)
22136 unsigned char * where
= (unsigned char *) buf
;
22138 if (target_big_endian
)
22143 result
|= (*where
++ & 255);
22151 result
|= (where
[n
] & 255);
22158 /* MD interface: Sections. */
22160 /* Calculate the maximum variable size (i.e., excluding fr_fix)
22161 that an rs_machine_dependent frag may reach. */
22164 arm_frag_max_var (fragS
*fragp
)
22166 /* We only use rs_machine_dependent for variable-size Thumb instructions,
22167 which are either THUMB_SIZE (2) or INSN_SIZE (4).
22169 Note that we generate relaxable instructions even for cases that don't
22170 really need it, like an immediate that's a trivial constant. So we're
22171 overestimating the instruction size for some of those cases. Rather
22172 than putting more intelligence here, it would probably be better to
22173 avoid generating a relaxation frag in the first place when it can be
22174 determined up front that a short instruction will suffice. */
22176 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
22180 /* Estimate the size of a frag before relaxing. Assume everything fits in
22184 md_estimate_size_before_relax (fragS
* fragp
,
22185 segT segtype ATTRIBUTE_UNUSED
)
22191 /* Convert a machine dependent frag. */
22194 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
22196 unsigned long insn
;
22197 unsigned long old_op
;
22205 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
22207 old_op
= bfd_get_16(abfd
, buf
);
22208 if (fragp
->fr_symbol
)
22210 exp
.X_op
= O_symbol
;
22211 exp
.X_add_symbol
= fragp
->fr_symbol
;
22215 exp
.X_op
= O_constant
;
22217 exp
.X_add_number
= fragp
->fr_offset
;
22218 opcode
= fragp
->fr_subtype
;
22221 case T_MNEM_ldr_pc
:
22222 case T_MNEM_ldr_pc2
:
22223 case T_MNEM_ldr_sp
:
22224 case T_MNEM_str_sp
:
22231 if (fragp
->fr_var
== 4)
22233 insn
= THUMB_OP32 (opcode
);
22234 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
22236 insn
|= (old_op
& 0x700) << 4;
22240 insn
|= (old_op
& 7) << 12;
22241 insn
|= (old_op
& 0x38) << 13;
22243 insn
|= 0x00000c00;
22244 put_thumb32_insn (buf
, insn
);
22245 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
22249 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
22251 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
22254 if (fragp
->fr_var
== 4)
22256 insn
= THUMB_OP32 (opcode
);
22257 insn
|= (old_op
& 0xf0) << 4;
22258 put_thumb32_insn (buf
, insn
);
22259 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
22263 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22264 exp
.X_add_number
-= 4;
22272 if (fragp
->fr_var
== 4)
22274 int r0off
= (opcode
== T_MNEM_mov
22275 || opcode
== T_MNEM_movs
) ? 0 : 8;
22276 insn
= THUMB_OP32 (opcode
);
22277 insn
= (insn
& 0xe1ffffff) | 0x10000000;
22278 insn
|= (old_op
& 0x700) << r0off
;
22279 put_thumb32_insn (buf
, insn
);
22280 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
22284 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
22289 if (fragp
->fr_var
== 4)
22291 insn
= THUMB_OP32(opcode
);
22292 put_thumb32_insn (buf
, insn
);
22293 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
22296 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
22300 if (fragp
->fr_var
== 4)
22302 insn
= THUMB_OP32(opcode
);
22303 insn
|= (old_op
& 0xf00) << 14;
22304 put_thumb32_insn (buf
, insn
);
22305 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
22308 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
22311 case T_MNEM_add_sp
:
22312 case T_MNEM_add_pc
:
22313 case T_MNEM_inc_sp
:
22314 case T_MNEM_dec_sp
:
22315 if (fragp
->fr_var
== 4)
22317 /* ??? Choose between add and addw. */
22318 insn
= THUMB_OP32 (opcode
);
22319 insn
|= (old_op
& 0xf0) << 4;
22320 put_thumb32_insn (buf
, insn
);
22321 if (opcode
== T_MNEM_add_pc
)
22322 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
22324 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
22327 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22335 if (fragp
->fr_var
== 4)
22337 insn
= THUMB_OP32 (opcode
);
22338 insn
|= (old_op
& 0xf0) << 4;
22339 insn
|= (old_op
& 0xf) << 16;
22340 put_thumb32_insn (buf
, insn
);
22341 if (insn
& (1 << 20))
22342 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
22344 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
22347 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22353 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
22354 (enum bfd_reloc_code_real
) reloc_type
);
22355 fixp
->fx_file
= fragp
->fr_file
;
22356 fixp
->fx_line
= fragp
->fr_line
;
22357 fragp
->fr_fix
+= fragp
->fr_var
;
22359 /* Set whether we use thumb-2 ISA based on final relaxation results. */
22360 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
22361 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
22362 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
22365 /* Return the size of a relaxable immediate operand instruction.
22366 SHIFT and SIZE specify the form of the allowable immediate. */
22368 relax_immediate (fragS
*fragp
, int size
, int shift
)
22374 /* ??? Should be able to do better than this. */
22375 if (fragp
->fr_symbol
)
22378 low
= (1 << shift
) - 1;
22379 mask
= (1 << (shift
+ size
)) - (1 << shift
);
22380 offset
= fragp
->fr_offset
;
22381 /* Force misaligned offsets to 32-bit variant. */
22384 if (offset
& ~mask
)
22389 /* Get the address of a symbol during relaxation. */
22391 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
22397 sym
= fragp
->fr_symbol
;
22398 sym_frag
= symbol_get_frag (sym
);
22399 know (S_GET_SEGMENT (sym
) != absolute_section
22400 || sym_frag
== &zero_address_frag
);
22401 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
22403 /* If frag has yet to be reached on this pass, assume it will
22404 move by STRETCH just as we did. If this is not so, it will
22405 be because some frag between grows, and that will force
22409 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
22413 /* Adjust stretch for any alignment frag. Note that if have
22414 been expanding the earlier code, the symbol may be
22415 defined in what appears to be an earlier frag. FIXME:
22416 This doesn't handle the fr_subtype field, which specifies
22417 a maximum number of bytes to skip when doing an
22419 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
22421 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
22424 stretch
= - ((- stretch
)
22425 & ~ ((1 << (int) f
->fr_offset
) - 1));
22427 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
22439 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
22442 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
22447 /* Assume worst case for symbols not known to be in the same section. */
22448 if (fragp
->fr_symbol
== NULL
22449 || !S_IS_DEFINED (fragp
->fr_symbol
)
22450 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
22451 || S_IS_WEAK (fragp
->fr_symbol
))
22454 val
= relaxed_symbol_addr (fragp
, stretch
);
22455 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
22456 addr
= (addr
+ 4) & ~3;
22457 /* Force misaligned targets to 32-bit variant. */
22461 if (val
< 0 || val
> 1020)
22466 /* Return the size of a relaxable add/sub immediate instruction. */
22468 relax_addsub (fragS
*fragp
, asection
*sec
)
22473 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
22474 op
= bfd_get_16(sec
->owner
, buf
);
22475 if ((op
& 0xf) == ((op
>> 4) & 0xf))
22476 return relax_immediate (fragp
, 8, 0);
22478 return relax_immediate (fragp
, 3, 0);
22481 /* Return TRUE iff the definition of symbol S could be pre-empted
22482 (overridden) at link or load time. */
22484 symbol_preemptible (symbolS
*s
)
22486 /* Weak symbols can always be pre-empted. */
22490 /* Non-global symbols cannot be pre-empted. */
22491 if (! S_IS_EXTERNAL (s
))
22495 /* In ELF, a global symbol can be marked protected, or private. In that
22496 case it can't be pre-empted (other definitions in the same link unit
22497 would violate the ODR). */
22498 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
22502 /* Other global symbols might be pre-empted. */
22506 /* Return the size of a relaxable branch instruction. BITS is the
22507 size of the offset field in the narrow instruction. */
22510 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
22516 /* Assume worst case for symbols not known to be in the same section. */
22517 if (!S_IS_DEFINED (fragp
->fr_symbol
)
22518 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
22519 || S_IS_WEAK (fragp
->fr_symbol
))
22523 /* A branch to a function in ARM state will require interworking. */
22524 if (S_IS_DEFINED (fragp
->fr_symbol
)
22525 && ARM_IS_FUNC (fragp
->fr_symbol
))
22529 if (symbol_preemptible (fragp
->fr_symbol
))
22532 val
= relaxed_symbol_addr (fragp
, stretch
);
22533 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
22536 /* Offset is a signed value *2 */
22538 if (val
>= limit
|| val
< -limit
)
22544 /* Relax a machine dependent frag. This returns the amount by which
22545 the current size of the frag should change. */
22548 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
22553 oldsize
= fragp
->fr_var
;
22554 switch (fragp
->fr_subtype
)
22556 case T_MNEM_ldr_pc2
:
22557 newsize
= relax_adr (fragp
, sec
, stretch
);
22559 case T_MNEM_ldr_pc
:
22560 case T_MNEM_ldr_sp
:
22561 case T_MNEM_str_sp
:
22562 newsize
= relax_immediate (fragp
, 8, 2);
22566 newsize
= relax_immediate (fragp
, 5, 2);
22570 newsize
= relax_immediate (fragp
, 5, 1);
22574 newsize
= relax_immediate (fragp
, 5, 0);
22577 newsize
= relax_adr (fragp
, sec
, stretch
);
22583 newsize
= relax_immediate (fragp
, 8, 0);
22586 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
22589 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
22591 case T_MNEM_add_sp
:
22592 case T_MNEM_add_pc
:
22593 newsize
= relax_immediate (fragp
, 8, 2);
22595 case T_MNEM_inc_sp
:
22596 case T_MNEM_dec_sp
:
22597 newsize
= relax_immediate (fragp
, 7, 2);
22603 newsize
= relax_addsub (fragp
, sec
);
22609 fragp
->fr_var
= newsize
;
22610 /* Freeze wide instructions that are at or before the same location as
22611 in the previous pass. This avoids infinite loops.
22612 Don't freeze them unconditionally because targets may be artificially
22613 misaligned by the expansion of preceding frags. */
22614 if (stretch
<= 0 && newsize
> 2)
22616 md_convert_frag (sec
->owner
, sec
, fragp
);
22620 return newsize
- oldsize
;
22623 /* Round up a section size to the appropriate boundary. */
22626 md_section_align (segT segment ATTRIBUTE_UNUSED
,
22632 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22633 of an rs_align_code fragment. */
22636 arm_handle_align (fragS
* fragP
)
22638 static unsigned char const arm_noop
[2][2][4] =
22641 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22642 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22645 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22646 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22649 static unsigned char const thumb_noop
[2][2][2] =
22652 {0xc0, 0x46}, /* LE */
22653 {0x46, 0xc0}, /* BE */
22656 {0x00, 0xbf}, /* LE */
22657 {0xbf, 0x00} /* BE */
22660 static unsigned char const wide_thumb_noop
[2][4] =
22661 { /* Wide Thumb-2 */
22662 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22663 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22666 unsigned bytes
, fix
, noop_size
;
22668 const unsigned char * noop
;
22669 const unsigned char *narrow_noop
= NULL
;
22674 if (fragP
->fr_type
!= rs_align_code
)
22677 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
22678 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
22681 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
22682 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
22684 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
22686 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
22688 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
22689 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
22691 narrow_noop
= thumb_noop
[1][target_big_endian
];
22692 noop
= wide_thumb_noop
[target_big_endian
];
22695 noop
= thumb_noop
[0][target_big_endian
];
22703 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
22704 ? selected_cpu
: arm_arch_none
,
22706 [target_big_endian
];
22713 fragP
->fr_var
= noop_size
;
22715 if (bytes
& (noop_size
- 1))
22717 fix
= bytes
& (noop_size
- 1);
22719 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
22721 memset (p
, 0, fix
);
22728 if (bytes
& noop_size
)
22730 /* Insert a narrow noop. */
22731 memcpy (p
, narrow_noop
, noop_size
);
22733 bytes
-= noop_size
;
22737 /* Use wide noops for the remainder */
22741 while (bytes
>= noop_size
)
22743 memcpy (p
, noop
, noop_size
);
22745 bytes
-= noop_size
;
22749 fragP
->fr_fix
+= fix
;
22752 /* Called from md_do_align. Used to create an alignment
22753 frag in a code section. */
22756 arm_frag_align_code (int n
, int max
)
22760 /* We assume that there will never be a requirement
22761 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22762 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
22767 _("alignments greater than %d bytes not supported in .text sections."),
22768 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
22769 as_fatal ("%s", err_msg
);
22772 p
= frag_var (rs_align_code
,
22773 MAX_MEM_FOR_RS_ALIGN_CODE
,
22775 (relax_substateT
) max
,
22782 /* Perform target specific initialisation of a frag.
22783 Note - despite the name this initialisation is not done when the frag
22784 is created, but only when its type is assigned. A frag can be created
22785 and used a long time before its type is set, so beware of assuming that
22786 this initialisation is performed first. */
22790 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
22792 /* Record whether this frag is in an ARM or a THUMB area. */
22793 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22796 #else /* OBJ_ELF is defined. */
22798 arm_init_frag (fragS
* fragP
, int max_chars
)
22800 bfd_boolean frag_thumb_mode
;
22802 /* If the current ARM vs THUMB mode has not already
22803 been recorded into this frag then do so now. */
22804 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
22805 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22807 /* PR 21809: Do not set a mapping state for debug sections
22808 - it just confuses other tools. */
22809 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
22812 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
22814 /* Record a mapping symbol for alignment frags. We will delete this
22815 later if the alignment ends up empty. */
22816 switch (fragP
->fr_type
)
22819 case rs_align_test
:
22821 mapping_state_2 (MAP_DATA
, max_chars
);
22823 case rs_align_code
:
22824 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
22831 /* When we change sections we need to issue a new mapping symbol. */
22834 arm_elf_change_section (void)
22836 /* Link an unlinked unwind index table section to the .text section. */
22837 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
22838 && elf_linked_to_section (now_seg
) == NULL
)
22839 elf_linked_to_section (now_seg
) = text_section
;
22843 arm_elf_section_type (const char * str
, size_t len
)
22845 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
22846 return SHT_ARM_EXIDX
;
22851 /* Code to deal with unwinding tables. */
22853 static void add_unwind_adjustsp (offsetT
);
22855 /* Generate any deferred unwind frame offset. */
22858 flush_pending_unwind (void)
22862 offset
= unwind
.pending_offset
;
22863 unwind
.pending_offset
= 0;
22865 add_unwind_adjustsp (offset
);
22868 /* Add an opcode to this list for this function. Two-byte opcodes should
22869 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22873 add_unwind_opcode (valueT op
, int length
)
22875 /* Add any deferred stack adjustment. */
22876 if (unwind
.pending_offset
)
22877 flush_pending_unwind ();
22879 unwind
.sp_restored
= 0;
22881 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
22883 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
22884 if (unwind
.opcodes
)
22885 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
22886 unwind
.opcode_alloc
);
22888 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
22893 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
22895 unwind
.opcode_count
++;
22899 /* Add unwind opcodes to adjust the stack pointer. */
22902 add_unwind_adjustsp (offsetT offset
)
22906 if (offset
> 0x200)
22908 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22913 /* Long form: 0xb2, uleb128. */
22914 /* This might not fit in a word so add the individual bytes,
22915 remembering the list is built in reverse order. */
22916 o
= (valueT
) ((offset
- 0x204) >> 2);
22918 add_unwind_opcode (0, 1);
22920 /* Calculate the uleb128 encoding of the offset. */
22924 bytes
[n
] = o
& 0x7f;
22930 /* Add the insn. */
22932 add_unwind_opcode (bytes
[n
- 1], 1);
22933 add_unwind_opcode (0xb2, 1);
22935 else if (offset
> 0x100)
22937 /* Two short opcodes. */
22938 add_unwind_opcode (0x3f, 1);
22939 op
= (offset
- 0x104) >> 2;
22940 add_unwind_opcode (op
, 1);
22942 else if (offset
> 0)
22944 /* Short opcode. */
22945 op
= (offset
- 4) >> 2;
22946 add_unwind_opcode (op
, 1);
22948 else if (offset
< 0)
22951 while (offset
> 0x100)
22953 add_unwind_opcode (0x7f, 1);
22956 op
= ((offset
- 4) >> 2) | 0x40;
22957 add_unwind_opcode (op
, 1);
22961 /* Finish the list of unwind opcodes for this function. */
22964 finish_unwind_opcodes (void)
22968 if (unwind
.fp_used
)
22970 /* Adjust sp as necessary. */
22971 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
22972 flush_pending_unwind ();
22974 /* After restoring sp from the frame pointer. */
22975 op
= 0x90 | unwind
.fp_reg
;
22976 add_unwind_opcode (op
, 1);
22979 flush_pending_unwind ();
22983 /* Start an exception table entry. If idx is nonzero this is an index table
22987 start_unwind_section (const segT text_seg
, int idx
)
22989 const char * text_name
;
22990 const char * prefix
;
22991 const char * prefix_once
;
22992 const char * group_name
;
23000 prefix
= ELF_STRING_ARM_unwind
;
23001 prefix_once
= ELF_STRING_ARM_unwind_once
;
23002 type
= SHT_ARM_EXIDX
;
23006 prefix
= ELF_STRING_ARM_unwind_info
;
23007 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
23008 type
= SHT_PROGBITS
;
23011 text_name
= segment_name (text_seg
);
23012 if (streq (text_name
, ".text"))
23015 if (strncmp (text_name
, ".gnu.linkonce.t.",
23016 strlen (".gnu.linkonce.t.")) == 0)
23018 prefix
= prefix_once
;
23019 text_name
+= strlen (".gnu.linkonce.t.");
23022 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
23028 /* Handle COMDAT group. */
23029 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
23031 group_name
= elf_group_name (text_seg
);
23032 if (group_name
== NULL
)
23034 as_bad (_("Group section `%s' has no group signature"),
23035 segment_name (text_seg
));
23036 ignore_rest_of_line ();
23039 flags
|= SHF_GROUP
;
23043 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
23046 /* Set the section link for index tables. */
23048 elf_linked_to_section (now_seg
) = text_seg
;
23052 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23053 personality routine data. Returns zero, or the index table value for
23054 an inline entry. */
23057 create_unwind_entry (int have_data
)
23062 /* The current word of data. */
23064 /* The number of bytes left in this word. */
23067 finish_unwind_opcodes ();
23069 /* Remember the current text section. */
23070 unwind
.saved_seg
= now_seg
;
23071 unwind
.saved_subseg
= now_subseg
;
23073 start_unwind_section (now_seg
, 0);
23075 if (unwind
.personality_routine
== NULL
)
23077 if (unwind
.personality_index
== -2)
23080 as_bad (_("handlerdata in cantunwind frame"));
23081 return 1; /* EXIDX_CANTUNWIND. */
23084 /* Use a default personality routine if none is specified. */
23085 if (unwind
.personality_index
== -1)
23087 if (unwind
.opcode_count
> 3)
23088 unwind
.personality_index
= 1;
23090 unwind
.personality_index
= 0;
23093 /* Space for the personality routine entry. */
23094 if (unwind
.personality_index
== 0)
23096 if (unwind
.opcode_count
> 3)
23097 as_bad (_("too many unwind opcodes for personality routine 0"));
23101 /* All the data is inline in the index table. */
23104 while (unwind
.opcode_count
> 0)
23106 unwind
.opcode_count
--;
23107 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23111 /* Pad with "finish" opcodes. */
23113 data
= (data
<< 8) | 0xb0;
23120 /* We get two opcodes "free" in the first word. */
23121 size
= unwind
.opcode_count
- 2;
23125 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
23126 if (unwind
.personality_index
!= -1)
23128 as_bad (_("attempt to recreate an unwind entry"));
23132 /* An extra byte is required for the opcode count. */
23133 size
= unwind
.opcode_count
+ 1;
23136 size
= (size
+ 3) >> 2;
23138 as_bad (_("too many unwind opcodes"));
23140 frag_align (2, 0, 0);
23141 record_alignment (now_seg
, 2);
23142 unwind
.table_entry
= expr_build_dot ();
23144 /* Allocate the table entry. */
23145 ptr
= frag_more ((size
<< 2) + 4);
23146 /* PR 13449: Zero the table entries in case some of them are not used. */
23147 memset (ptr
, 0, (size
<< 2) + 4);
23148 where
= frag_now_fix () - ((size
<< 2) + 4);
23150 switch (unwind
.personality_index
)
23153 /* ??? Should this be a PLT generating relocation? */
23154 /* Custom personality routine. */
23155 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
23156 BFD_RELOC_ARM_PREL31
);
23161 /* Set the first byte to the number of additional words. */
23162 data
= size
> 0 ? size
- 1 : 0;
23166 /* ABI defined personality routines. */
23168 /* Three opcodes bytes are packed into the first word. */
23175 /* The size and first two opcode bytes go in the first word. */
23176 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
23181 /* Should never happen. */
23185 /* Pack the opcodes into words (MSB first), reversing the list at the same
23187 while (unwind
.opcode_count
> 0)
23191 md_number_to_chars (ptr
, data
, 4);
23196 unwind
.opcode_count
--;
23198 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23201 /* Finish off the last word. */
23204 /* Pad with "finish" opcodes. */
23206 data
= (data
<< 8) | 0xb0;
23208 md_number_to_chars (ptr
, data
, 4);
23213 /* Add an empty descriptor if there is no user-specified data. */
23214 ptr
= frag_more (4);
23215 md_number_to_chars (ptr
, 0, 4);
23222 /* Initialize the DWARF-2 unwind information for this procedure. */
23225 tc_arm_frame_initial_instructions (void)
23227 cfi_add_CFA_def_cfa (REG_SP
, 0);
23229 #endif /* OBJ_ELF */
23231 /* Convert REGNAME to a DWARF-2 register number. */
23234 tc_arm_regname_to_dw2regnum (char *regname
)
23236 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
23240 /* PR 16694: Allow VFP registers as well. */
23241 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
23245 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
23254 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
23258 exp
.X_op
= O_secrel
;
23259 exp
.X_add_symbol
= symbol
;
23260 exp
.X_add_number
= 0;
23261 emit_expr (&exp
, size
);
23265 /* MD interface: Symbol and relocation handling. */
23267 /* Return the address within the segment that a PC-relative fixup is
23268 relative to. For ARM, PC-relative fixups applied to instructions
23269 are generally relative to the location of the fixup plus 8 bytes.
23270 Thumb branches are offset by 4, and Thumb loads relative to PC
23271 require special handling. */
23274 md_pcrel_from_section (fixS
* fixP
, segT seg
)
23276 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23278 /* If this is pc-relative and we are going to emit a relocation
23279 then we just want to put out any pipeline compensation that the linker
23280 will need. Otherwise we want to use the calculated base.
23281 For WinCE we skip the bias for externals as well, since this
23282 is how the MS ARM-CE assembler behaves and we want to be compatible. */
23284 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23285 || (arm_force_relocation (fixP
)
23287 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
23293 switch (fixP
->fx_r_type
)
23295 /* PC relative addressing on the Thumb is slightly odd as the
23296 bottom two bits of the PC are forced to zero for the
23297 calculation. This happens *after* application of the
23298 pipeline offset. However, Thumb adrl already adjusts for
23299 this, so we need not do it again. */
23300 case BFD_RELOC_ARM_THUMB_ADD
:
23303 case BFD_RELOC_ARM_THUMB_OFFSET
:
23304 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
23305 case BFD_RELOC_ARM_T32_ADD_PC12
:
23306 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23307 return (base
+ 4) & ~3;
23309 /* Thumb branches are simply offset by +4. */
23310 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
23311 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23312 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23313 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23314 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23315 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23316 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
23317 case BFD_RELOC_ARM_THUMB_BF17
:
23318 case BFD_RELOC_ARM_THUMB_BF19
:
23319 case BFD_RELOC_ARM_THUMB_BF13
:
23320 case BFD_RELOC_ARM_THUMB_LOOP12
:
23323 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23325 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23326 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23327 && ARM_IS_FUNC (fixP
->fx_addsy
)
23328 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23329 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23332 /* BLX is like branches above, but forces the low two bits of PC to
23334 case BFD_RELOC_THUMB_PCREL_BLX
:
23336 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23337 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23338 && THUMB_IS_FUNC (fixP
->fx_addsy
)
23339 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23340 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23341 return (base
+ 4) & ~3;
23343 /* ARM mode branches are offset by +8. However, the Windows CE
23344 loader expects the relocation not to take this into account. */
23345 case BFD_RELOC_ARM_PCREL_BLX
:
23347 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23348 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23349 && ARM_IS_FUNC (fixP
->fx_addsy
)
23350 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23351 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23354 case BFD_RELOC_ARM_PCREL_CALL
:
23356 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23357 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23358 && THUMB_IS_FUNC (fixP
->fx_addsy
)
23359 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23360 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23363 case BFD_RELOC_ARM_PCREL_BRANCH
:
23364 case BFD_RELOC_ARM_PCREL_JUMP
:
23365 case BFD_RELOC_ARM_PLT32
:
23367 /* When handling fixups immediately, because we have already
23368 discovered the value of a symbol, or the address of the frag involved
23369 we must account for the offset by +8, as the OS loader will never see the reloc.
23370 see fixup_segment() in write.c
23371 The S_IS_EXTERNAL test handles the case of global symbols.
23372 Those need the calculated base, not just the pipe compensation the linker will need. */
23374 && fixP
->fx_addsy
!= NULL
23375 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23376 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
23384 /* ARM mode loads relative to PC are also offset by +8. Unlike
23385 branches, the Windows CE loader *does* expect the relocation
23386 to take this into account. */
23387 case BFD_RELOC_ARM_OFFSET_IMM
:
23388 case BFD_RELOC_ARM_OFFSET_IMM8
:
23389 case BFD_RELOC_ARM_HWLITERAL
:
23390 case BFD_RELOC_ARM_LITERAL
:
23391 case BFD_RELOC_ARM_CP_OFF_IMM
:
23395 /* Other PC-relative relocations are un-offset. */
23401 static bfd_boolean flag_warn_syms
= TRUE
;
23404 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
23406 /* PR 18347 - Warn if the user attempts to create a symbol with the same
23407 name as an ARM instruction. Whilst strictly speaking it is allowed, it
23408 does mean that the resulting code might be very confusing to the reader.
23409 Also this warning can be triggered if the user omits an operand before
23410 an immediate address, eg:
23414 GAS treats this as an assignment of the value of the symbol foo to a
23415 symbol LDR, and so (without this code) it will not issue any kind of
23416 warning or error message.
23418 Note - ARM instructions are case-insensitive but the strings in the hash
23419 table are all stored in lower case, so we must first ensure that name is
23421 if (flag_warn_syms
&& arm_ops_hsh
)
23423 char * nbuf
= strdup (name
);
23426 for (p
= nbuf
; *p
; p
++)
23428 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
23430 static struct hash_control
* already_warned
= NULL
;
23432 if (already_warned
== NULL
)
23433 already_warned
= hash_new ();
23434 /* Only warn about the symbol once. To keep the code
23435 simple we let hash_insert do the lookup for us. */
23436 if (hash_insert (already_warned
, nbuf
, NULL
) == NULL
)
23437 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
23446 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
23447 Otherwise we have no need to default values of symbols. */
23450 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
23453 if (name
[0] == '_' && name
[1] == 'G'
23454 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
23458 if (symbol_find (name
))
23459 as_bad (_("GOT already in the symbol table"));
23461 GOT_symbol
= symbol_new (name
, undefined_section
,
23462 (valueT
) 0, & zero_address_frag
);
23472 /* Subroutine of md_apply_fix. Check to see if an immediate can be
23473 computed as two separate immediate values, added together. We
23474 already know that this value cannot be computed by just one ARM
23477 static unsigned int
23478 validate_immediate_twopart (unsigned int val
,
23479 unsigned int * highpart
)
23484 for (i
= 0; i
< 32; i
+= 2)
23485 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
23491 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
23493 else if (a
& 0xff0000)
23495 if (a
& 0xff000000)
23497 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
23501 gas_assert (a
& 0xff000000);
23502 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
23505 return (a
& 0xff) | (i
<< 7);
23512 validate_offset_imm (unsigned int val
, int hwse
)
23514 if ((hwse
&& val
> 255) || val
> 4095)
23519 /* Subroutine of md_apply_fix. Do those data_ops which can take a
23520 negative immediate constant by altering the instruction. A bit of
23525 by inverting the second operand, and
23528 by negating the second operand. */
23531 negate_data_op (unsigned long * instruction
,
23532 unsigned long value
)
23535 unsigned long negated
, inverted
;
23537 negated
= encode_arm_immediate (-value
);
23538 inverted
= encode_arm_immediate (~value
);
23540 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
23543 /* First negates. */
23544 case OPCODE_SUB
: /* ADD <-> SUB */
23545 new_inst
= OPCODE_ADD
;
23550 new_inst
= OPCODE_SUB
;
23554 case OPCODE_CMP
: /* CMP <-> CMN */
23555 new_inst
= OPCODE_CMN
;
23560 new_inst
= OPCODE_CMP
;
23564 /* Now Inverted ops. */
23565 case OPCODE_MOV
: /* MOV <-> MVN */
23566 new_inst
= OPCODE_MVN
;
23571 new_inst
= OPCODE_MOV
;
23575 case OPCODE_AND
: /* AND <-> BIC */
23576 new_inst
= OPCODE_BIC
;
23581 new_inst
= OPCODE_AND
;
23585 case OPCODE_ADC
: /* ADC <-> SBC */
23586 new_inst
= OPCODE_SBC
;
23591 new_inst
= OPCODE_ADC
;
23595 /* We cannot do anything. */
23600 if (value
== (unsigned) FAIL
)
23603 *instruction
&= OPCODE_MASK
;
23604 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
23608 /* Like negate_data_op, but for Thumb-2. */
23610 static unsigned int
23611 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
23615 unsigned int negated
, inverted
;
23617 negated
= encode_thumb32_immediate (-value
);
23618 inverted
= encode_thumb32_immediate (~value
);
23620 rd
= (*instruction
>> 8) & 0xf;
23621 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
23624 /* ADD <-> SUB. Includes CMP <-> CMN. */
23625 case T2_OPCODE_SUB
:
23626 new_inst
= T2_OPCODE_ADD
;
23630 case T2_OPCODE_ADD
:
23631 new_inst
= T2_OPCODE_SUB
;
23635 /* ORR <-> ORN. Includes MOV <-> MVN. */
23636 case T2_OPCODE_ORR
:
23637 new_inst
= T2_OPCODE_ORN
;
23641 case T2_OPCODE_ORN
:
23642 new_inst
= T2_OPCODE_ORR
;
23646 /* AND <-> BIC. TST has no inverted equivalent. */
23647 case T2_OPCODE_AND
:
23648 new_inst
= T2_OPCODE_BIC
;
23655 case T2_OPCODE_BIC
:
23656 new_inst
= T2_OPCODE_AND
;
23661 case T2_OPCODE_ADC
:
23662 new_inst
= T2_OPCODE_SBC
;
23666 case T2_OPCODE_SBC
:
23667 new_inst
= T2_OPCODE_ADC
;
23671 /* We cannot do anything. */
23676 if (value
== (unsigned int)FAIL
)
23679 *instruction
&= T2_OPCODE_MASK
;
23680 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
23684 /* Read a 32-bit thumb instruction from buf. */
23686 static unsigned long
23687 get_thumb32_insn (char * buf
)
23689 unsigned long insn
;
23690 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
23691 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23696 /* We usually want to set the low bit on the address of thumb function
23697 symbols. In particular .word foo - . should have the low bit set.
23698 Generic code tries to fold the difference of two symbols to
23699 a constant. Prevent this and force a relocation when the first symbols
23700 is a thumb function. */
23703 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
23705 if (op
== O_subtract
23706 && l
->X_op
== O_symbol
23707 && r
->X_op
== O_symbol
23708 && THUMB_IS_FUNC (l
->X_add_symbol
))
23710 l
->X_op
= O_subtract
;
23711 l
->X_op_symbol
= r
->X_add_symbol
;
23712 l
->X_add_number
-= r
->X_add_number
;
23716 /* Process as normal. */
23720 /* Encode Thumb2 unconditional branches and calls. The encoding
23721 for the 2 are identical for the immediate values. */
23724 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
23726 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23729 addressT S
, I1
, I2
, lo
, hi
;
23731 S
= (value
>> 24) & 0x01;
23732 I1
= (value
>> 23) & 0x01;
23733 I2
= (value
>> 22) & 0x01;
23734 hi
= (value
>> 12) & 0x3ff;
23735 lo
= (value
>> 1) & 0x7ff;
23736 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23737 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23738 newval
|= (S
<< 10) | hi
;
23739 newval2
&= ~T2I1I2MASK
;
23740 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
23741 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23742 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23746 md_apply_fix (fixS
* fixP
,
23750 offsetT value
= * valP
;
23752 unsigned int newimm
;
23753 unsigned long temp
;
23755 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
23757 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
23759 /* Note whether this will delete the relocation. */
23761 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
23764 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23765 consistency with the behaviour on 32-bit hosts. Remember value
23767 value
&= 0xffffffff;
23768 value
^= 0x80000000;
23769 value
-= 0x80000000;
23772 fixP
->fx_addnumber
= value
;
23774 /* Same treatment for fixP->fx_offset. */
23775 fixP
->fx_offset
&= 0xffffffff;
23776 fixP
->fx_offset
^= 0x80000000;
23777 fixP
->fx_offset
-= 0x80000000;
23779 switch (fixP
->fx_r_type
)
23781 case BFD_RELOC_NONE
:
23782 /* This will need to go in the object file. */
23786 case BFD_RELOC_ARM_IMMEDIATE
:
23787 /* We claim that this fixup has been processed here,
23788 even if in fact we generate an error because we do
23789 not have a reloc for it, so tc_gen_reloc will reject it. */
23792 if (fixP
->fx_addsy
)
23794 const char *msg
= 0;
23796 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23797 msg
= _("undefined symbol %s used as an immediate value");
23798 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23799 msg
= _("symbol %s is in a different section");
23800 else if (S_IS_WEAK (fixP
->fx_addsy
))
23801 msg
= _("symbol %s is weak and may be overridden later");
23805 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23806 msg
, S_GET_NAME (fixP
->fx_addsy
));
23811 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23813 /* If the offset is negative, we should use encoding A2 for ADR. */
23814 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
23815 newimm
= negate_data_op (&temp
, value
);
23818 newimm
= encode_arm_immediate (value
);
23820 /* If the instruction will fail, see if we can fix things up by
23821 changing the opcode. */
23822 if (newimm
== (unsigned int) FAIL
)
23823 newimm
= negate_data_op (&temp
, value
);
23824 /* MOV accepts both ARM modified immediate (A1 encoding) and
23825 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23826 When disassembling, MOV is preferred when there is no encoding
23828 if (newimm
== (unsigned int) FAIL
23829 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
23830 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
23831 && !((temp
>> SBIT_SHIFT
) & 0x1)
23832 && value
>= 0 && value
<= 0xffff)
23834 /* Clear bits[23:20] to change encoding from A1 to A2. */
23835 temp
&= 0xff0fffff;
23836 /* Encoding high 4bits imm. Code below will encode the remaining
23838 temp
|= (value
& 0x0000f000) << 4;
23839 newimm
= value
& 0x00000fff;
23843 if (newimm
== (unsigned int) FAIL
)
23845 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23846 _("invalid constant (%lx) after fixup"),
23847 (unsigned long) value
);
23851 newimm
|= (temp
& 0xfffff000);
23852 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23855 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23857 unsigned int highpart
= 0;
23858 unsigned int newinsn
= 0xe1a00000; /* nop. */
23860 if (fixP
->fx_addsy
)
23862 const char *msg
= 0;
23864 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23865 msg
= _("undefined symbol %s used as an immediate value");
23866 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23867 msg
= _("symbol %s is in a different section");
23868 else if (S_IS_WEAK (fixP
->fx_addsy
))
23869 msg
= _("symbol %s is weak and may be overridden later");
23873 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23874 msg
, S_GET_NAME (fixP
->fx_addsy
));
23879 newimm
= encode_arm_immediate (value
);
23880 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23882 /* If the instruction will fail, see if we can fix things up by
23883 changing the opcode. */
23884 if (newimm
== (unsigned int) FAIL
23885 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
23887 /* No ? OK - try using two ADD instructions to generate
23889 newimm
= validate_immediate_twopart (value
, & highpart
);
23891 /* Yes - then make sure that the second instruction is
23893 if (newimm
!= (unsigned int) FAIL
)
23895 /* Still No ? Try using a negated value. */
23896 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
23897 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
23898 /* Otherwise - give up. */
23901 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23902 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23907 /* Replace the first operand in the 2nd instruction (which
23908 is the PC) with the destination register. We have
23909 already added in the PC in the first instruction and we
23910 do not want to do it again. */
23911 newinsn
&= ~ 0xf0000;
23912 newinsn
|= ((newinsn
& 0x0f000) << 4);
23915 newimm
|= (temp
& 0xfffff000);
23916 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23918 highpart
|= (newinsn
& 0xfffff000);
23919 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
23923 case BFD_RELOC_ARM_OFFSET_IMM
:
23924 if (!fixP
->fx_done
&& seg
->use_rela_p
)
23926 /* Fall through. */
23928 case BFD_RELOC_ARM_LITERAL
:
23934 if (validate_offset_imm (value
, 0) == FAIL
)
23936 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
23937 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23938 _("invalid literal constant: pool needs to be closer"));
23940 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23941 _("bad immediate value for offset (%ld)"),
23946 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23948 newval
&= 0xfffff000;
23951 newval
&= 0xff7ff000;
23952 newval
|= value
| (sign
? INDEX_UP
: 0);
23954 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23957 case BFD_RELOC_ARM_OFFSET_IMM8
:
23958 case BFD_RELOC_ARM_HWLITERAL
:
23964 if (validate_offset_imm (value
, 1) == FAIL
)
23966 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
23967 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23968 _("invalid literal constant: pool needs to be closer"));
23970 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23971 _("bad immediate value for 8-bit offset (%ld)"),
23976 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23978 newval
&= 0xfffff0f0;
23981 newval
&= 0xff7ff0f0;
23982 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
23984 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23987 case BFD_RELOC_ARM_T32_OFFSET_U8
:
23988 if (value
< 0 || value
> 1020 || value
% 4 != 0)
23989 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23990 _("bad immediate value for offset (%ld)"), (long) value
);
23993 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
23995 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
23998 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
23999 /* This is a complicated relocation used for all varieties of Thumb32
24000 load/store instruction with immediate offset:
24002 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24003 *4, optional writeback(W)
24004 (doubleword load/store)
24006 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24007 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24008 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24009 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24010 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24012 Uppercase letters indicate bits that are already encoded at
24013 this point. Lowercase letters are our problem. For the
24014 second block of instructions, the secondary opcode nybble
24015 (bits 8..11) is present, and bit 23 is zero, even if this is
24016 a PC-relative operation. */
24017 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24019 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
24021 if ((newval
& 0xf0000000) == 0xe0000000)
24023 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24025 newval
|= (1 << 23);
24028 if (value
% 4 != 0)
24030 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24031 _("offset not a multiple of 4"));
24037 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24038 _("offset out of range"));
24043 else if ((newval
& 0x000f0000) == 0x000f0000)
24045 /* PC-relative, 12-bit offset. */
24047 newval
|= (1 << 23);
24052 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24053 _("offset out of range"));
24058 else if ((newval
& 0x00000100) == 0x00000100)
24060 /* Writeback: 8-bit, +/- offset. */
24062 newval
|= (1 << 9);
24067 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24068 _("offset out of range"));
24073 else if ((newval
& 0x00000f00) == 0x00000e00)
24075 /* T-instruction: positive 8-bit offset. */
24076 if (value
< 0 || value
> 0xff)
24078 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24079 _("offset out of range"));
24087 /* Positive 12-bit or negative 8-bit offset. */
24091 newval
|= (1 << 23);
24101 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24102 _("offset out of range"));
24109 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
24110 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
24113 case BFD_RELOC_ARM_SHIFT_IMM
:
24114 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24115 if (((unsigned long) value
) > 32
24117 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
24119 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24120 _("shift expression is too large"));
24125 /* Shifts of zero must be done as lsl. */
24127 else if (value
== 32)
24129 newval
&= 0xfffff07f;
24130 newval
|= (value
& 0x1f) << 7;
24131 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24134 case BFD_RELOC_ARM_T32_IMMEDIATE
:
24135 case BFD_RELOC_ARM_T32_ADD_IMM
:
24136 case BFD_RELOC_ARM_T32_IMM12
:
24137 case BFD_RELOC_ARM_T32_ADD_PC12
:
24138 /* We claim that this fixup has been processed here,
24139 even if in fact we generate an error because we do
24140 not have a reloc for it, so tc_gen_reloc will reject it. */
24144 && ! S_IS_DEFINED (fixP
->fx_addsy
))
24146 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24147 _("undefined symbol %s used as an immediate value"),
24148 S_GET_NAME (fixP
->fx_addsy
));
24152 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24154 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
24157 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24158 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
24159 Thumb2 modified immediate encoding (T2). */
24160 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
24161 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24163 newimm
= encode_thumb32_immediate (value
);
24164 if (newimm
== (unsigned int) FAIL
)
24165 newimm
= thumb32_negate_data_op (&newval
, value
);
24167 if (newimm
== (unsigned int) FAIL
)
24169 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
24171 /* Turn add/sum into addw/subw. */
24172 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24173 newval
= (newval
& 0xfeffffff) | 0x02000000;
24174 /* No flat 12-bit imm encoding for addsw/subsw. */
24175 if ((newval
& 0x00100000) == 0)
24177 /* 12 bit immediate for addw/subw. */
24181 newval
^= 0x00a00000;
24184 newimm
= (unsigned int) FAIL
;
24191 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24192 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24193 disassembling, MOV is preferred when there is no encoding
24195 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
24196 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24197 but with the Rn field [19:16] set to 1111. */
24198 && (((newval
>> 16) & 0xf) == 0xf)
24199 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
24200 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
24201 && value
>= 0 && value
<= 0xffff)
24203 /* Toggle bit[25] to change encoding from T2 to T3. */
24205 /* Clear bits[19:16]. */
24206 newval
&= 0xfff0ffff;
24207 /* Encoding high 4bits imm. Code below will encode the
24208 remaining low 12bits. */
24209 newval
|= (value
& 0x0000f000) << 4;
24210 newimm
= value
& 0x00000fff;
24215 if (newimm
== (unsigned int)FAIL
)
24217 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24218 _("invalid constant (%lx) after fixup"),
24219 (unsigned long) value
);
24223 newval
|= (newimm
& 0x800) << 15;
24224 newval
|= (newimm
& 0x700) << 4;
24225 newval
|= (newimm
& 0x0ff);
24227 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
24228 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
24231 case BFD_RELOC_ARM_SMC
:
24232 if (((unsigned long) value
) > 0xffff)
24233 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24234 _("invalid smc expression"));
24235 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24236 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24237 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24240 case BFD_RELOC_ARM_HVC
:
24241 if (((unsigned long) value
) > 0xffff)
24242 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24243 _("invalid hvc expression"));
24244 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24245 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24246 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24249 case BFD_RELOC_ARM_SWI
:
24250 if (fixP
->tc_fix_data
!= 0)
24252 if (((unsigned long) value
) > 0xff)
24253 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24254 _("invalid swi expression"));
24255 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24257 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24261 if (((unsigned long) value
) > 0x00ffffff)
24262 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24263 _("invalid swi expression"));
24264 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24266 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24270 case BFD_RELOC_ARM_MULTI
:
24271 if (((unsigned long) value
) > 0xffff)
24272 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24273 _("invalid expression in load/store multiple"));
24274 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
24275 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24279 case BFD_RELOC_ARM_PCREL_CALL
:
24281 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24283 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24284 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24285 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24286 /* Flip the bl to blx. This is a simple flip
24287 bit here because we generate PCREL_CALL for
24288 unconditional bls. */
24290 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24291 newval
= newval
| 0x10000000;
24292 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24298 goto arm_branch_common
;
24300 case BFD_RELOC_ARM_PCREL_JUMP
:
24301 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24303 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24304 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24305 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24307 /* This would map to a bl<cond>, b<cond>,
24308 b<always> to a Thumb function. We
24309 need to force a relocation for this particular
24311 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24314 /* Fall through. */
24316 case BFD_RELOC_ARM_PLT32
:
24318 case BFD_RELOC_ARM_PCREL_BRANCH
:
24320 goto arm_branch_common
;
24322 case BFD_RELOC_ARM_PCREL_BLX
:
24325 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24327 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24328 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24329 && ARM_IS_FUNC (fixP
->fx_addsy
))
24331 /* Flip the blx to a bl and warn. */
24332 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
24333 newval
= 0xeb000000;
24334 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
24335 _("blx to '%s' an ARM ISA state function changed to bl"),
24337 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24343 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
24344 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
24348 /* We are going to store value (shifted right by two) in the
24349 instruction, in a 24 bit, signed field. Bits 26 through 32 either
24350 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
24353 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24354 _("misaligned branch destination"));
24355 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
24356 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
24357 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24359 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24361 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24362 newval
|= (value
>> 2) & 0x00ffffff;
24363 /* Set the H bit on BLX instructions. */
24367 newval
|= 0x01000000;
24369 newval
&= ~0x01000000;
24371 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24375 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
24376 /* CBZ can only branch forward. */
24378 /* Attempts to use CBZ to branch to the next instruction
24379 (which, strictly speaking, are prohibited) will be turned into
24382 FIXME: It may be better to remove the instruction completely and
24383 perform relaxation. */
24386 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24387 newval
= 0xbf00; /* NOP encoding T1 */
24388 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24393 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24395 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24397 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24398 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
24399 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24404 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
24405 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
24406 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24408 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24410 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24411 newval
|= (value
& 0x1ff) >> 1;
24412 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24416 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
24417 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
24418 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24420 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24422 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24423 newval
|= (value
& 0xfff) >> 1;
24424 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24428 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24430 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24431 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24432 && ARM_IS_FUNC (fixP
->fx_addsy
)
24433 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24435 /* Force a relocation for a branch 20 bits wide. */
24438 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
24439 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24440 _("conditional branch out of range"));
24442 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24445 addressT S
, J1
, J2
, lo
, hi
;
24447 S
= (value
& 0x00100000) >> 20;
24448 J2
= (value
& 0x00080000) >> 19;
24449 J1
= (value
& 0x00040000) >> 18;
24450 hi
= (value
& 0x0003f000) >> 12;
24451 lo
= (value
& 0x00000ffe) >> 1;
24453 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24454 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24455 newval
|= (S
<< 10) | hi
;
24456 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
24457 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24458 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
24462 case BFD_RELOC_THUMB_PCREL_BLX
:
24463 /* If there is a blx from a thumb state function to
24464 another thumb function flip this to a bl and warn
24468 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24469 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24470 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24472 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
24473 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
24474 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
24476 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24477 newval
= newval
| 0x1000;
24478 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
24479 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24484 goto thumb_bl_common
;
24486 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24487 /* A bl from Thumb state ISA to an internal ARM state function
24488 is converted to a blx. */
24490 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24491 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24492 && ARM_IS_FUNC (fixP
->fx_addsy
)
24493 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24495 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24496 newval
= newval
& ~0x1000;
24497 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
24498 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
24504 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
24505 /* For a BLX instruction, make sure that the relocation is rounded up
24506 to a word boundary. This follows the semantics of the instruction
24507 which specifies that bit 1 of the target address will come from bit
24508 1 of the base address. */
24509 value
= (value
+ 3) & ~ 3;
24512 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
24513 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
24514 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24517 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
24519 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
24520 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24521 else if ((value
& ~0x1ffffff)
24522 && ((value
& ~0x1ffffff) != ~0x1ffffff))
24523 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24524 _("Thumb2 branch out of range"));
24527 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24528 encode_thumb2_b_bl_offset (buf
, value
);
24532 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24533 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
24534 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24536 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24537 encode_thumb2_b_bl_offset (buf
, value
);
24542 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24547 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24548 md_number_to_chars (buf
, value
, 2);
24552 case BFD_RELOC_ARM_TLS_CALL
:
24553 case BFD_RELOC_ARM_THM_TLS_CALL
:
24554 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24555 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24556 case BFD_RELOC_ARM_TLS_GOTDESC
:
24557 case BFD_RELOC_ARM_TLS_GD32
:
24558 case BFD_RELOC_ARM_TLS_LE32
:
24559 case BFD_RELOC_ARM_TLS_IE32
:
24560 case BFD_RELOC_ARM_TLS_LDM32
:
24561 case BFD_RELOC_ARM_TLS_LDO32
:
24562 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
24565 /* Same handling as above, but with the arm_fdpic guard. */
24566 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
24567 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
24568 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
24571 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
24575 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24576 _("Relocation supported only in FDPIC mode"));
24580 case BFD_RELOC_ARM_GOT32
:
24581 case BFD_RELOC_ARM_GOTOFF
:
24584 case BFD_RELOC_ARM_GOT_PREL
:
24585 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24586 md_number_to_chars (buf
, value
, 4);
24589 case BFD_RELOC_ARM_TARGET2
:
24590 /* TARGET2 is not partial-inplace, so we need to write the
24591 addend here for REL targets, because it won't be written out
24592 during reloc processing later. */
24593 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24594 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
24597 /* Relocations for FDPIC. */
24598 case BFD_RELOC_ARM_GOTFUNCDESC
:
24599 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
24600 case BFD_RELOC_ARM_FUNCDESC
:
24603 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24604 md_number_to_chars (buf
, 0, 4);
24608 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24609 _("Relocation supported only in FDPIC mode"));
24614 case BFD_RELOC_RVA
:
24616 case BFD_RELOC_ARM_TARGET1
:
24617 case BFD_RELOC_ARM_ROSEGREL32
:
24618 case BFD_RELOC_ARM_SBREL32
:
24619 case BFD_RELOC_32_PCREL
:
24621 case BFD_RELOC_32_SECREL
:
24623 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24625 /* For WinCE we only do this for pcrel fixups. */
24626 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
24628 md_number_to_chars (buf
, value
, 4);
24632 case BFD_RELOC_ARM_PREL31
:
24633 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24635 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
24636 if ((value
^ (value
>> 1)) & 0x40000000)
24638 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24639 _("rel31 relocation overflow"));
24641 newval
|= value
& 0x7fffffff;
24642 md_number_to_chars (buf
, newval
, 4);
24647 case BFD_RELOC_ARM_CP_OFF_IMM
:
24648 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
24649 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
:
24650 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
24651 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24653 newval
= get_thumb32_insn (buf
);
24654 if ((newval
& 0x0f200f00) == 0x0d000900)
24656 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24657 has permitted values that are multiples of 2, in the range 0
24659 if (value
< -510 || value
> 510 || (value
& 1))
24660 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24661 _("co-processor offset out of range"));
24663 else if ((newval
& 0xfe001f80) == 0xec000f80)
24665 if (value
< -511 || value
> 512 || (value
& 3))
24666 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24667 _("co-processor offset out of range"));
24669 else if (value
< -1023 || value
> 1023 || (value
& 3))
24670 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24671 _("co-processor offset out of range"));
24676 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24677 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
24678 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24680 newval
= get_thumb32_insn (buf
);
24683 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
24684 newval
&= 0xffffff80;
24686 newval
&= 0xffffff00;
24690 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
24691 newval
&= 0xff7fff80;
24693 newval
&= 0xff7fff00;
24694 if ((newval
& 0x0f200f00) == 0x0d000900)
24696 /* This is a fp16 vstr/vldr.
24698 It requires the immediate offset in the instruction is shifted
24699 left by 1 to be a half-word offset.
24701 Here, left shift by 1 first, and later right shift by 2
24702 should get the right offset. */
24705 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
24707 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24708 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
24709 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24711 put_thumb32_insn (buf
, newval
);
24714 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
24715 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
24716 if (value
< -255 || value
> 255)
24717 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24718 _("co-processor offset out of range"));
24720 goto cp_off_common
;
24722 case BFD_RELOC_ARM_THUMB_OFFSET
:
24723 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24724 /* Exactly what ranges, and where the offset is inserted depends
24725 on the type of instruction, we can establish this from the
24727 switch (newval
>> 12)
24729 case 4: /* PC load. */
24730 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24731 forced to zero for these loads; md_pcrel_from has already
24732 compensated for this. */
24734 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24735 _("invalid offset, target not word aligned (0x%08lX)"),
24736 (((unsigned long) fixP
->fx_frag
->fr_address
24737 + (unsigned long) fixP
->fx_where
) & ~3)
24738 + (unsigned long) value
);
24740 if (value
& ~0x3fc)
24741 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24742 _("invalid offset, value too big (0x%08lX)"),
24745 newval
|= value
>> 2;
24748 case 9: /* SP load/store. */
24749 if (value
& ~0x3fc)
24750 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24751 _("invalid offset, value too big (0x%08lX)"),
24753 newval
|= value
>> 2;
24756 case 6: /* Word load/store. */
24758 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24759 _("invalid offset, value too big (0x%08lX)"),
24761 newval
|= value
<< 4; /* 6 - 2. */
24764 case 7: /* Byte load/store. */
24766 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24767 _("invalid offset, value too big (0x%08lX)"),
24769 newval
|= value
<< 6;
24772 case 8: /* Halfword load/store. */
24774 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24775 _("invalid offset, value too big (0x%08lX)"),
24777 newval
|= value
<< 5; /* 6 - 1. */
24781 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24782 "Unable to process relocation for thumb opcode: %lx",
24783 (unsigned long) newval
);
24786 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24789 case BFD_RELOC_ARM_THUMB_ADD
:
24790 /* This is a complicated relocation, since we use it for all of
24791 the following immediate relocations:
24795 9bit ADD/SUB SP word-aligned
24796 10bit ADD PC/SP word-aligned
24798 The type of instruction being processed is encoded in the
24805 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24807 int rd
= (newval
>> 4) & 0xf;
24808 int rs
= newval
& 0xf;
24809 int subtract
= !!(newval
& 0x8000);
24811 /* Check for HI regs, only very restricted cases allowed:
24812 Adjusting SP, and using PC or SP to get an address. */
24813 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
24814 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
24815 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24816 _("invalid Hi register with immediate"));
24818 /* If value is negative, choose the opposite instruction. */
24822 subtract
= !subtract
;
24824 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24825 _("immediate value out of range"));
24830 if (value
& ~0x1fc)
24831 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24832 _("invalid immediate for stack address calculation"));
24833 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
24834 newval
|= value
>> 2;
24836 else if (rs
== REG_PC
|| rs
== REG_SP
)
24838 /* PR gas/18541. If the addition is for a defined symbol
24839 within range of an ADR instruction then accept it. */
24842 && fixP
->fx_addsy
!= NULL
)
24846 if (! S_IS_DEFINED (fixP
->fx_addsy
)
24847 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
24848 || S_IS_WEAK (fixP
->fx_addsy
))
24850 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24851 _("address calculation needs a strongly defined nearby symbol"));
24855 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24857 /* Round up to the next 4-byte boundary. */
24862 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
24866 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24867 _("symbol too far away"));
24877 if (subtract
|| value
& ~0x3fc)
24878 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24879 _("invalid immediate for address calculation (value = 0x%08lX)"),
24880 (unsigned long) (subtract
? - value
: value
));
24881 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
24883 newval
|= value
>> 2;
24888 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24889 _("immediate value out of range"));
24890 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
24891 newval
|= (rd
<< 8) | value
;
24896 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24897 _("immediate value out of range"));
24898 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
24899 newval
|= rd
| (rs
<< 3) | (value
<< 6);
24902 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24905 case BFD_RELOC_ARM_THUMB_IMM
:
24906 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24907 if (value
< 0 || value
> 255)
24908 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24909 _("invalid immediate: %ld is out of range"),
24912 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24915 case BFD_RELOC_ARM_THUMB_SHIFT
:
24916 /* 5bit shift value (0..32). LSL cannot take 32. */
24917 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
24918 temp
= newval
& 0xf800;
24919 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
24920 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24921 _("invalid shift value: %ld"), (long) value
);
24922 /* Shifts of zero must be encoded as LSL. */
24924 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
24925 /* Shifts of 32 are encoded as zero. */
24926 else if (value
== 32)
24928 newval
|= value
<< 6;
24929 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24932 case BFD_RELOC_VTABLE_INHERIT
:
24933 case BFD_RELOC_VTABLE_ENTRY
:
24937 case BFD_RELOC_ARM_MOVW
:
24938 case BFD_RELOC_ARM_MOVT
:
24939 case BFD_RELOC_ARM_THUMB_MOVW
:
24940 case BFD_RELOC_ARM_THUMB_MOVT
:
24941 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24943 /* REL format relocations are limited to a 16-bit addend. */
24944 if (!fixP
->fx_done
)
24946 if (value
< -0x8000 || value
> 0x7fff)
24947 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24948 _("offset out of range"));
24950 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24951 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24956 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24957 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24959 newval
= get_thumb32_insn (buf
);
24960 newval
&= 0xfbf08f00;
24961 newval
|= (value
& 0xf000) << 4;
24962 newval
|= (value
& 0x0800) << 15;
24963 newval
|= (value
& 0x0700) << 4;
24964 newval
|= (value
& 0x00ff);
24965 put_thumb32_insn (buf
, newval
);
24969 newval
= md_chars_to_number (buf
, 4);
24970 newval
&= 0xfff0f000;
24971 newval
|= value
& 0x0fff;
24972 newval
|= (value
& 0xf000) << 4;
24973 md_number_to_chars (buf
, newval
, 4);
24978 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24979 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24980 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24981 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24982 gas_assert (!fixP
->fx_done
);
24985 bfd_boolean is_mov
;
24986 bfd_vma encoded_addend
= value
;
24988 /* Check that addend can be encoded in instruction. */
24989 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
24990 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24991 _("the offset 0x%08lX is not representable"),
24992 (unsigned long) encoded_addend
);
24994 /* Extract the instruction. */
24995 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
24996 is_mov
= (insn
& 0xf800) == 0x2000;
25001 if (!seg
->use_rela_p
)
25002 insn
|= encoded_addend
;
25008 /* Extract the instruction. */
25009 /* Encoding is the following
25014 /* The following conditions must be true :
25019 rd
= (insn
>> 4) & 0xf;
25021 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
25022 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25023 _("Unable to process relocation for thumb opcode: %lx"),
25024 (unsigned long) insn
);
25026 /* Encode as ADD immediate8 thumb 1 code. */
25027 insn
= 0x3000 | (rd
<< 8);
25029 /* Place the encoded addend into the first 8 bits of the
25031 if (!seg
->use_rela_p
)
25032 insn
|= encoded_addend
;
25035 /* Update the instruction. */
25036 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
25040 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25041 case BFD_RELOC_ARM_ALU_PC_G0
:
25042 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25043 case BFD_RELOC_ARM_ALU_PC_G1
:
25044 case BFD_RELOC_ARM_ALU_PC_G2
:
25045 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25046 case BFD_RELOC_ARM_ALU_SB_G0
:
25047 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25048 case BFD_RELOC_ARM_ALU_SB_G1
:
25049 case BFD_RELOC_ARM_ALU_SB_G2
:
25050 gas_assert (!fixP
->fx_done
);
25051 if (!seg
->use_rela_p
)
25054 bfd_vma encoded_addend
;
25055 bfd_vma addend_abs
= llabs (value
);
25057 /* Check that the absolute value of the addend can be
25058 expressed as an 8-bit constant plus a rotation. */
25059 encoded_addend
= encode_arm_immediate (addend_abs
);
25060 if (encoded_addend
== (unsigned int) FAIL
)
25061 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25062 _("the offset 0x%08lX is not representable"),
25063 (unsigned long) addend_abs
);
25065 /* Extract the instruction. */
25066 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25068 /* If the addend is positive, use an ADD instruction.
25069 Otherwise use a SUB. Take care not to destroy the S bit. */
25070 insn
&= 0xff1fffff;
25076 /* Place the encoded addend into the first 12 bits of the
25078 insn
&= 0xfffff000;
25079 insn
|= encoded_addend
;
25081 /* Update the instruction. */
25082 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25086 case BFD_RELOC_ARM_LDR_PC_G0
:
25087 case BFD_RELOC_ARM_LDR_PC_G1
:
25088 case BFD_RELOC_ARM_LDR_PC_G2
:
25089 case BFD_RELOC_ARM_LDR_SB_G0
:
25090 case BFD_RELOC_ARM_LDR_SB_G1
:
25091 case BFD_RELOC_ARM_LDR_SB_G2
:
25092 gas_assert (!fixP
->fx_done
);
25093 if (!seg
->use_rela_p
)
25096 bfd_vma addend_abs
= llabs (value
);
25098 /* Check that the absolute value of the addend can be
25099 encoded in 12 bits. */
25100 if (addend_abs
>= 0x1000)
25101 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25102 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25103 (unsigned long) addend_abs
);
25105 /* Extract the instruction. */
25106 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25108 /* If the addend is negative, clear bit 23 of the instruction.
25109 Otherwise set it. */
25111 insn
&= ~(1 << 23);
25115 /* Place the absolute value of the addend into the first 12 bits
25116 of the instruction. */
25117 insn
&= 0xfffff000;
25118 insn
|= addend_abs
;
25120 /* Update the instruction. */
25121 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25125 case BFD_RELOC_ARM_LDRS_PC_G0
:
25126 case BFD_RELOC_ARM_LDRS_PC_G1
:
25127 case BFD_RELOC_ARM_LDRS_PC_G2
:
25128 case BFD_RELOC_ARM_LDRS_SB_G0
:
25129 case BFD_RELOC_ARM_LDRS_SB_G1
:
25130 case BFD_RELOC_ARM_LDRS_SB_G2
:
25131 gas_assert (!fixP
->fx_done
);
25132 if (!seg
->use_rela_p
)
25135 bfd_vma addend_abs
= llabs (value
);
25137 /* Check that the absolute value of the addend can be
25138 encoded in 8 bits. */
25139 if (addend_abs
>= 0x100)
25140 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25141 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
25142 (unsigned long) addend_abs
);
25144 /* Extract the instruction. */
25145 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25147 /* If the addend is negative, clear bit 23 of the instruction.
25148 Otherwise set it. */
25150 insn
&= ~(1 << 23);
25154 /* Place the first four bits of the absolute value of the addend
25155 into the first 4 bits of the instruction, and the remaining
25156 four into bits 8 .. 11. */
25157 insn
&= 0xfffff0f0;
25158 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
25160 /* Update the instruction. */
25161 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25165 case BFD_RELOC_ARM_LDC_PC_G0
:
25166 case BFD_RELOC_ARM_LDC_PC_G1
:
25167 case BFD_RELOC_ARM_LDC_PC_G2
:
25168 case BFD_RELOC_ARM_LDC_SB_G0
:
25169 case BFD_RELOC_ARM_LDC_SB_G1
:
25170 case BFD_RELOC_ARM_LDC_SB_G2
:
25171 gas_assert (!fixP
->fx_done
);
25172 if (!seg
->use_rela_p
)
25175 bfd_vma addend_abs
= llabs (value
);
25177 /* Check that the absolute value of the addend is a multiple of
25178 four and, when divided by four, fits in 8 bits. */
25179 if (addend_abs
& 0x3)
25180 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25181 _("bad offset 0x%08lX (must be word-aligned)"),
25182 (unsigned long) addend_abs
);
25184 if ((addend_abs
>> 2) > 0xff)
25185 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25186 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
25187 (unsigned long) addend_abs
);
25189 /* Extract the instruction. */
25190 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25192 /* If the addend is negative, clear bit 23 of the instruction.
25193 Otherwise set it. */
25195 insn
&= ~(1 << 23);
25199 /* Place the addend (divided by four) into the first eight
25200 bits of the instruction. */
25201 insn
&= 0xfffffff0;
25202 insn
|= addend_abs
>> 2;
25204 /* Update the instruction. */
25205 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25209 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
25211 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25212 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25213 && ARM_IS_FUNC (fixP
->fx_addsy
)
25214 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25216 /* Force a relocation for a branch 5 bits wide. */
25219 if (v8_1_branch_value_check (value
, 5, FALSE
) == FAIL
)
25220 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25223 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25225 addressT boff
= value
>> 1;
25227 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25228 newval
|= (boff
<< 7);
25229 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25233 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
25235 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25236 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25237 && ARM_IS_FUNC (fixP
->fx_addsy
)
25238 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25242 if ((value
& ~0x7f) && ((value
& ~0x3f) != ~0x3f))
25243 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25244 _("branch out of range"));
25246 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25248 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25250 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
25251 addressT diff
= value
- boff
;
25255 newval
|= 1 << 1; /* T bit. */
25257 else if (diff
!= 2)
25259 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25260 _("out of range label-relative fixup value"));
25262 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25266 case BFD_RELOC_ARM_THUMB_BF17
:
25268 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25269 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25270 && ARM_IS_FUNC (fixP
->fx_addsy
)
25271 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25273 /* Force a relocation for a branch 17 bits wide. */
25277 if (v8_1_branch_value_check (value
, 17, TRUE
) == FAIL
)
25278 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25281 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25284 addressT immA
, immB
, immC
;
25286 immA
= (value
& 0x0001f000) >> 12;
25287 immB
= (value
& 0x00000ffc) >> 2;
25288 immC
= (value
& 0x00000002) >> 1;
25290 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25291 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25293 newval2
|= (immC
<< 11) | (immB
<< 1);
25294 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25295 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25299 case BFD_RELOC_ARM_THUMB_BF19
:
25301 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25302 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25303 && ARM_IS_FUNC (fixP
->fx_addsy
)
25304 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25306 /* Force a relocation for a branch 19 bits wide. */
25310 if (v8_1_branch_value_check (value
, 19, TRUE
) == FAIL
)
25311 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25314 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25317 addressT immA
, immB
, immC
;
25319 immA
= (value
& 0x0007f000) >> 12;
25320 immB
= (value
& 0x00000ffc) >> 2;
25321 immC
= (value
& 0x00000002) >> 1;
25323 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25324 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25326 newval2
|= (immC
<< 11) | (immB
<< 1);
25327 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25328 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25332 case BFD_RELOC_ARM_THUMB_BF13
:
25334 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25335 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25336 && ARM_IS_FUNC (fixP
->fx_addsy
)
25337 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25339 /* Force a relocation for a branch 13 bits wide. */
25343 if (v8_1_branch_value_check (value
, 13, TRUE
) == FAIL
)
25344 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25347 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25350 addressT immA
, immB
, immC
;
25352 immA
= (value
& 0x00001000) >> 12;
25353 immB
= (value
& 0x00000ffc) >> 2;
25354 immC
= (value
& 0x00000002) >> 1;
25356 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25357 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25359 newval2
|= (immC
<< 11) | (immB
<< 1);
25360 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25361 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25365 case BFD_RELOC_ARM_THUMB_LOOP12
:
25367 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25368 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25369 && ARM_IS_FUNC (fixP
->fx_addsy
)
25370 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25372 /* Force a relocation for a branch 12 bits wide. */
25376 bfd_vma insn
= get_thumb32_insn (buf
);
25377 /* le lr, <label> or le <label> */
25378 if (((insn
& 0xffffffff) == 0xf00fc001)
25379 || ((insn
& 0xffffffff) == 0xf02fc001))
25382 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
25383 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25385 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25387 addressT imml
, immh
;
25389 immh
= (value
& 0x00000ffc) >> 2;
25390 imml
= (value
& 0x00000002) >> 1;
25392 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25393 newval
|= (imml
<< 11) | (immh
<< 1);
25394 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
25398 case BFD_RELOC_ARM_V4BX
:
25399 /* This will need to go in the object file. */
25403 case BFD_RELOC_UNUSED
:
25405 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25406 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
25410 /* Translate internal representation of relocation info to BFD target
25414 tc_gen_reloc (asection
*section
, fixS
*fixp
)
25417 bfd_reloc_code_real_type code
;
25419 reloc
= XNEW (arelent
);
25421 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
25422 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
25423 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
25425 if (fixp
->fx_pcrel
)
25427 if (section
->use_rela_p
)
25428 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
25430 fixp
->fx_offset
= reloc
->address
;
25432 reloc
->addend
= fixp
->fx_offset
;
25434 switch (fixp
->fx_r_type
)
25437 if (fixp
->fx_pcrel
)
25439 code
= BFD_RELOC_8_PCREL
;
25442 /* Fall through. */
25445 if (fixp
->fx_pcrel
)
25447 code
= BFD_RELOC_16_PCREL
;
25450 /* Fall through. */
25453 if (fixp
->fx_pcrel
)
25455 code
= BFD_RELOC_32_PCREL
;
25458 /* Fall through. */
25460 case BFD_RELOC_ARM_MOVW
:
25461 if (fixp
->fx_pcrel
)
25463 code
= BFD_RELOC_ARM_MOVW_PCREL
;
25466 /* Fall through. */
25468 case BFD_RELOC_ARM_MOVT
:
25469 if (fixp
->fx_pcrel
)
25471 code
= BFD_RELOC_ARM_MOVT_PCREL
;
25474 /* Fall through. */
25476 case BFD_RELOC_ARM_THUMB_MOVW
:
25477 if (fixp
->fx_pcrel
)
25479 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
25482 /* Fall through. */
25484 case BFD_RELOC_ARM_THUMB_MOVT
:
25485 if (fixp
->fx_pcrel
)
25487 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
25490 /* Fall through. */
25492 case BFD_RELOC_NONE
:
25493 case BFD_RELOC_ARM_PCREL_BRANCH
:
25494 case BFD_RELOC_ARM_PCREL_BLX
:
25495 case BFD_RELOC_RVA
:
25496 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
25497 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
25498 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
25499 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25500 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25501 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25502 case BFD_RELOC_VTABLE_ENTRY
:
25503 case BFD_RELOC_VTABLE_INHERIT
:
25505 case BFD_RELOC_32_SECREL
:
25507 code
= fixp
->fx_r_type
;
25510 case BFD_RELOC_THUMB_PCREL_BLX
:
25512 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
25513 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25516 code
= BFD_RELOC_THUMB_PCREL_BLX
;
25519 case BFD_RELOC_ARM_LITERAL
:
25520 case BFD_RELOC_ARM_HWLITERAL
:
25521 /* If this is called then the a literal has
25522 been referenced across a section boundary. */
25523 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25524 _("literal referenced across section boundary"));
25528 case BFD_RELOC_ARM_TLS_CALL
:
25529 case BFD_RELOC_ARM_THM_TLS_CALL
:
25530 case BFD_RELOC_ARM_TLS_DESCSEQ
:
25531 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
25532 case BFD_RELOC_ARM_GOT32
:
25533 case BFD_RELOC_ARM_GOTOFF
:
25534 case BFD_RELOC_ARM_GOT_PREL
:
25535 case BFD_RELOC_ARM_PLT32
:
25536 case BFD_RELOC_ARM_TARGET1
:
25537 case BFD_RELOC_ARM_ROSEGREL32
:
25538 case BFD_RELOC_ARM_SBREL32
:
25539 case BFD_RELOC_ARM_PREL31
:
25540 case BFD_RELOC_ARM_TARGET2
:
25541 case BFD_RELOC_ARM_TLS_LDO32
:
25542 case BFD_RELOC_ARM_PCREL_CALL
:
25543 case BFD_RELOC_ARM_PCREL_JUMP
:
25544 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25545 case BFD_RELOC_ARM_ALU_PC_G0
:
25546 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25547 case BFD_RELOC_ARM_ALU_PC_G1
:
25548 case BFD_RELOC_ARM_ALU_PC_G2
:
25549 case BFD_RELOC_ARM_LDR_PC_G0
:
25550 case BFD_RELOC_ARM_LDR_PC_G1
:
25551 case BFD_RELOC_ARM_LDR_PC_G2
:
25552 case BFD_RELOC_ARM_LDRS_PC_G0
:
25553 case BFD_RELOC_ARM_LDRS_PC_G1
:
25554 case BFD_RELOC_ARM_LDRS_PC_G2
:
25555 case BFD_RELOC_ARM_LDC_PC_G0
:
25556 case BFD_RELOC_ARM_LDC_PC_G1
:
25557 case BFD_RELOC_ARM_LDC_PC_G2
:
25558 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25559 case BFD_RELOC_ARM_ALU_SB_G0
:
25560 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25561 case BFD_RELOC_ARM_ALU_SB_G1
:
25562 case BFD_RELOC_ARM_ALU_SB_G2
:
25563 case BFD_RELOC_ARM_LDR_SB_G0
:
25564 case BFD_RELOC_ARM_LDR_SB_G1
:
25565 case BFD_RELOC_ARM_LDR_SB_G2
:
25566 case BFD_RELOC_ARM_LDRS_SB_G0
:
25567 case BFD_RELOC_ARM_LDRS_SB_G1
:
25568 case BFD_RELOC_ARM_LDRS_SB_G2
:
25569 case BFD_RELOC_ARM_LDC_SB_G0
:
25570 case BFD_RELOC_ARM_LDC_SB_G1
:
25571 case BFD_RELOC_ARM_LDC_SB_G2
:
25572 case BFD_RELOC_ARM_V4BX
:
25573 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
25574 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
25575 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
25576 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
25577 case BFD_RELOC_ARM_GOTFUNCDESC
:
25578 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
25579 case BFD_RELOC_ARM_FUNCDESC
:
25580 case BFD_RELOC_ARM_THUMB_BF17
:
25581 case BFD_RELOC_ARM_THUMB_BF19
:
25582 case BFD_RELOC_ARM_THUMB_BF13
:
25583 code
= fixp
->fx_r_type
;
25586 case BFD_RELOC_ARM_TLS_GOTDESC
:
25587 case BFD_RELOC_ARM_TLS_GD32
:
25588 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
25589 case BFD_RELOC_ARM_TLS_LE32
:
25590 case BFD_RELOC_ARM_TLS_IE32
:
25591 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
25592 case BFD_RELOC_ARM_TLS_LDM32
:
25593 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
25594 /* BFD will include the symbol's address in the addend.
25595 But we don't want that, so subtract it out again here. */
25596 if (!S_IS_COMMON (fixp
->fx_addsy
))
25597 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
25598 code
= fixp
->fx_r_type
;
25602 case BFD_RELOC_ARM_IMMEDIATE
:
25603 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25604 _("internal relocation (type: IMMEDIATE) not fixed up"));
25607 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
25608 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25609 _("ADRL used for a symbol not defined in the same file"));
25612 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
25613 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
25614 case BFD_RELOC_ARM_THUMB_LOOP12
:
25615 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25616 _("%s used for a symbol not defined in the same file"),
25617 bfd_get_reloc_code_name (fixp
->fx_r_type
));
25620 case BFD_RELOC_ARM_OFFSET_IMM
:
25621 if (section
->use_rela_p
)
25623 code
= fixp
->fx_r_type
;
25627 if (fixp
->fx_addsy
!= NULL
25628 && !S_IS_DEFINED (fixp
->fx_addsy
)
25629 && S_IS_LOCAL (fixp
->fx_addsy
))
25631 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25632 _("undefined local label `%s'"),
25633 S_GET_NAME (fixp
->fx_addsy
));
25637 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25638 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
25645 switch (fixp
->fx_r_type
)
25647 case BFD_RELOC_NONE
: type
= "NONE"; break;
25648 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
25649 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
25650 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
25651 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
25652 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
25653 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
25654 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
25655 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
25656 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
25657 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
25658 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
25659 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
25660 default: type
= _("<unknown>"); break;
25662 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25663 _("cannot represent %s relocation in this object file format"),
25670 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
25672 && fixp
->fx_addsy
== GOT_symbol
)
25674 code
= BFD_RELOC_ARM_GOTPC
;
25675 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
25679 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
25681 if (reloc
->howto
== NULL
)
25683 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25684 _("cannot represent %s relocation in this object file format"),
25685 bfd_get_reloc_code_name (code
));
25689 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
25690 vtable entry to be used in the relocation's section offset. */
25691 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
25692 reloc
->address
= fixp
->fx_offset
;
25697 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
25700 cons_fix_new_arm (fragS
* frag
,
25704 bfd_reloc_code_real_type reloc
)
25709 FIXME: @@ Should look at CPU word size. */
25713 reloc
= BFD_RELOC_8
;
25716 reloc
= BFD_RELOC_16
;
25720 reloc
= BFD_RELOC_32
;
25723 reloc
= BFD_RELOC_64
;
25728 if (exp
->X_op
== O_secrel
)
25730 exp
->X_op
= O_symbol
;
25731 reloc
= BFD_RELOC_32_SECREL
;
25735 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
25738 #if defined (OBJ_COFF)
25740 arm_validate_fix (fixS
* fixP
)
25742 /* If the destination of the branch is a defined symbol which does not have
25743 the THUMB_FUNC attribute, then we must be calling a function which has
25744 the (interfacearm) attribute. We look for the Thumb entry point to that
25745 function and change the branch to refer to that function instead. */
25746 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
25747 && fixP
->fx_addsy
!= NULL
25748 && S_IS_DEFINED (fixP
->fx_addsy
)
25749 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
25751 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
25758 arm_force_relocation (struct fix
* fixp
)
25760 #if defined (OBJ_COFF) && defined (TE_PE)
25761 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
25765 /* In case we have a call or a branch to a function in ARM ISA mode from
25766 a thumb function or vice-versa force the relocation. These relocations
25767 are cleared off for some cores that might have blx and simple transformations
25771 switch (fixp
->fx_r_type
)
25773 case BFD_RELOC_ARM_PCREL_JUMP
:
25774 case BFD_RELOC_ARM_PCREL_CALL
:
25775 case BFD_RELOC_THUMB_PCREL_BLX
:
25776 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
25780 case BFD_RELOC_ARM_PCREL_BLX
:
25781 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25782 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25783 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25784 if (ARM_IS_FUNC (fixp
->fx_addsy
))
25793 /* Resolve these relocations even if the symbol is extern or weak.
25794 Technically this is probably wrong due to symbol preemption.
25795 In practice these relocations do not have enough range to be useful
25796 at dynamic link time, and some code (e.g. in the Linux kernel)
25797 expects these references to be resolved. */
25798 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
25799 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
25800 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
25801 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
25802 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25803 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
25804 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
25805 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
25806 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
25807 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
25808 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
25809 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
25810 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
25811 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
25814 /* Always leave these relocations for the linker. */
25815 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
25816 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
25817 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
25820 /* Always generate relocations against function symbols. */
25821 if (fixp
->fx_r_type
== BFD_RELOC_32
25823 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
25826 return generic_force_reloc (fixp
);
25829 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25830 /* Relocations against function names must be left unadjusted,
25831 so that the linker can use this information to generate interworking
25832 stubs. The MIPS version of this function
25833 also prevents relocations that are mips-16 specific, but I do not
25834 know why it does this.
25837 There is one other problem that ought to be addressed here, but
25838 which currently is not: Taking the address of a label (rather
25839 than a function) and then later jumping to that address. Such
25840 addresses also ought to have their bottom bit set (assuming that
25841 they reside in Thumb code), but at the moment they will not. */
25844 arm_fix_adjustable (fixS
* fixP
)
25846 if (fixP
->fx_addsy
== NULL
)
25849 /* Preserve relocations against symbols with function type. */
25850 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
25853 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
25854 && fixP
->fx_subsy
== NULL
)
25857 /* We need the symbol name for the VTABLE entries. */
25858 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
25859 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
25862 /* Don't allow symbols to be discarded on GOT related relocs. */
25863 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
25864 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
25865 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
25866 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
25867 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
25868 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
25869 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
25870 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
25871 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
25872 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
25873 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
25874 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
25875 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
25876 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
25877 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
25878 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
25879 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
25882 /* Similarly for group relocations. */
25883 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
25884 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
25885 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
25888 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25889 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
25890 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
25891 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
25892 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
25893 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
25894 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
25895 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
25896 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
25899 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25900 offsets, so keep these symbols. */
25901 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25902 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
25907 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25911 elf32_arm_target_format (void)
25914 return (target_big_endian
25915 ? "elf32-bigarm-symbian"
25916 : "elf32-littlearm-symbian");
25917 #elif defined (TE_VXWORKS)
25918 return (target_big_endian
25919 ? "elf32-bigarm-vxworks"
25920 : "elf32-littlearm-vxworks");
25921 #elif defined (TE_NACL)
25922 return (target_big_endian
25923 ? "elf32-bigarm-nacl"
25924 : "elf32-littlearm-nacl");
25928 if (target_big_endian
)
25929 return "elf32-bigarm-fdpic";
25931 return "elf32-littlearm-fdpic";
25935 if (target_big_endian
)
25936 return "elf32-bigarm";
25938 return "elf32-littlearm";
25944 armelf_frob_symbol (symbolS
* symp
,
25947 elf_frob_symbol (symp
, puntp
);
25951 /* MD interface: Finalization. */
25956 literal_pool
* pool
;
25958 /* Ensure that all the IT blocks are properly closed. */
25959 check_it_blocks_finished ();
25961 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
25963 /* Put it at the end of the relevant section. */
25964 subseg_set (pool
->section
, pool
->sub_section
);
25966 arm_elf_change_section ();
25973 /* Remove any excess mapping symbols generated for alignment frags in
25974 SEC. We may have created a mapping symbol before a zero byte
25975 alignment; remove it if there's a mapping symbol after the
25978 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
25979 void *dummy ATTRIBUTE_UNUSED
)
25981 segment_info_type
*seginfo
= seg_info (sec
);
25984 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
25987 for (fragp
= seginfo
->frchainP
->frch_root
;
25989 fragp
= fragp
->fr_next
)
25991 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
25992 fragS
*next
= fragp
->fr_next
;
25994 /* Variable-sized frags have been converted to fixed size by
25995 this point. But if this was variable-sized to start with,
25996 there will be a fixed-size frag after it. So don't handle
25998 if (sym
== NULL
|| next
== NULL
)
26001 if (S_GET_VALUE (sym
) < next
->fr_address
)
26002 /* Not at the end of this frag. */
26004 know (S_GET_VALUE (sym
) == next
->fr_address
);
26008 if (next
->tc_frag_data
.first_map
!= NULL
)
26010 /* Next frag starts with a mapping symbol. Discard this
26012 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26016 if (next
->fr_next
== NULL
)
26018 /* This mapping symbol is at the end of the section. Discard
26020 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
26021 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26025 /* As long as we have empty frags without any mapping symbols,
26027 /* If the next frag is non-empty and does not start with a
26028 mapping symbol, then this mapping symbol is required. */
26029 if (next
->fr_address
!= next
->fr_next
->fr_address
)
26032 next
= next
->fr_next
;
26034 while (next
!= NULL
);
26039 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26043 arm_adjust_symtab (void)
26048 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26050 if (ARM_IS_THUMB (sym
))
26052 if (THUMB_IS_FUNC (sym
))
26054 /* Mark the symbol as a Thumb function. */
26055 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
26056 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
26057 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
26059 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
26060 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
26062 as_bad (_("%s: unexpected function type: %d"),
26063 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
26065 else switch (S_GET_STORAGE_CLASS (sym
))
26068 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
26071 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
26074 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
26082 if (ARM_IS_INTERWORK (sym
))
26083 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
26090 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26092 if (ARM_IS_THUMB (sym
))
26094 elf_symbol_type
* elf_sym
;
26096 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
26097 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
26099 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
26100 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
26102 /* If it's a .thumb_func, declare it as so,
26103 otherwise tag label as .code 16. */
26104 if (THUMB_IS_FUNC (sym
))
26105 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
26106 ST_BRANCH_TO_THUMB
);
26107 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26108 elf_sym
->internal_elf_sym
.st_info
=
26109 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
26114 /* Remove any overlapping mapping symbols generated by alignment frags. */
26115 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
26116 /* Now do generic ELF adjustments. */
26117 elf_adjust_symtab ();
26121 /* MD interface: Initialization. */
26124 set_constant_flonums (void)
26128 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
26129 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
26133 /* Auto-select Thumb mode if it's the only available instruction set for the
26134 given architecture. */
26137 autoselect_thumb_from_cpu_variant (void)
26139 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
26140 opcode_select (16);
26149 if ( (arm_ops_hsh
= hash_new ()) == NULL
26150 || (arm_cond_hsh
= hash_new ()) == NULL
26151 || (arm_shift_hsh
= hash_new ()) == NULL
26152 || (arm_psr_hsh
= hash_new ()) == NULL
26153 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
26154 || (arm_reg_hsh
= hash_new ()) == NULL
26155 || (arm_reloc_hsh
= hash_new ()) == NULL
26156 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
26157 as_fatal (_("virtual memory exhausted"));
26159 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
26160 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
26161 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
26162 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
26163 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
26164 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
26165 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
26166 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
26167 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
26168 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
26169 (void *) (v7m_psrs
+ i
));
26170 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
26171 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
26173 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
26175 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
26176 (void *) (barrier_opt_names
+ i
));
26178 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
26180 struct reloc_entry
* entry
= reloc_names
+ i
;
26182 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
26183 /* This makes encode_branch() use the EABI versions of this relocation. */
26184 entry
->reloc
= BFD_RELOC_UNUSED
;
26186 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
26190 set_constant_flonums ();
26192 /* Set the cpu variant based on the command-line options. We prefer
26193 -mcpu= over -march= if both are set (as for GCC); and we prefer
26194 -mfpu= over any other way of setting the floating point unit.
26195 Use of legacy options with new options are faulted. */
26198 if (mcpu_cpu_opt
|| march_cpu_opt
)
26199 as_bad (_("use of old and new-style options to set CPU type"));
26201 selected_arch
= *legacy_cpu
;
26203 else if (mcpu_cpu_opt
)
26205 selected_arch
= *mcpu_cpu_opt
;
26206 selected_ext
= *mcpu_ext_opt
;
26208 else if (march_cpu_opt
)
26210 selected_arch
= *march_cpu_opt
;
26211 selected_ext
= *march_ext_opt
;
26213 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
26218 as_bad (_("use of old and new-style options to set FPU type"));
26220 selected_fpu
= *legacy_fpu
;
26223 selected_fpu
= *mfpu_opt
;
26226 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
26227 || defined (TE_NetBSD) || defined (TE_VXWORKS))
26228 /* Some environments specify a default FPU. If they don't, infer it
26229 from the processor. */
26231 selected_fpu
= *mcpu_fpu_opt
;
26232 else if (march_fpu_opt
)
26233 selected_fpu
= *march_fpu_opt
;
26235 selected_fpu
= fpu_default
;
26239 if (ARM_FEATURE_ZERO (selected_fpu
))
26241 if (!no_cpu_selected ())
26242 selected_fpu
= fpu_default
;
26244 selected_fpu
= fpu_arch_fpa
;
26248 if (ARM_FEATURE_ZERO (selected_arch
))
26250 selected_arch
= cpu_default
;
26251 selected_cpu
= selected_arch
;
26253 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26255 /* Autodection of feature mode: allow all features in cpu_variant but leave
26256 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
26257 after all instruction have been processed and we can decide what CPU
26258 should be selected. */
26259 if (ARM_FEATURE_ZERO (selected_arch
))
26260 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
26262 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26265 autoselect_thumb_from_cpu_variant ();
26267 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
26269 #if defined OBJ_COFF || defined OBJ_ELF
26271 unsigned int flags
= 0;
26273 #if defined OBJ_ELF
26274 flags
= meabi_flags
;
26276 switch (meabi_flags
)
26278 case EF_ARM_EABI_UNKNOWN
:
26280 /* Set the flags in the private structure. */
26281 if (uses_apcs_26
) flags
|= F_APCS26
;
26282 if (support_interwork
) flags
|= F_INTERWORK
;
26283 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
26284 if (pic_code
) flags
|= F_PIC
;
26285 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
26286 flags
|= F_SOFT_FLOAT
;
26288 switch (mfloat_abi_opt
)
26290 case ARM_FLOAT_ABI_SOFT
:
26291 case ARM_FLOAT_ABI_SOFTFP
:
26292 flags
|= F_SOFT_FLOAT
;
26295 case ARM_FLOAT_ABI_HARD
:
26296 if (flags
& F_SOFT_FLOAT
)
26297 as_bad (_("hard-float conflicts with specified fpu"));
26301 /* Using pure-endian doubles (even if soft-float). */
26302 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
26303 flags
|= F_VFP_FLOAT
;
26305 #if defined OBJ_ELF
26306 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
26307 flags
|= EF_ARM_MAVERICK_FLOAT
;
26310 case EF_ARM_EABI_VER4
:
26311 case EF_ARM_EABI_VER5
:
26312 /* No additional flags to set. */
26319 bfd_set_private_flags (stdoutput
, flags
);
26321 /* We have run out flags in the COFF header to encode the
26322 status of ATPCS support, so instead we create a dummy,
26323 empty, debug section called .arm.atpcs. */
26328 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
26332 bfd_set_section_flags
26333 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
26334 bfd_set_section_size (stdoutput
, sec
, 0);
26335 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
26341 /* Record the CPU type as well. */
26342 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
26343 mach
= bfd_mach_arm_iWMMXt2
;
26344 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
26345 mach
= bfd_mach_arm_iWMMXt
;
26346 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
26347 mach
= bfd_mach_arm_XScale
;
26348 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
26349 mach
= bfd_mach_arm_ep9312
;
26350 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
26351 mach
= bfd_mach_arm_5TE
;
26352 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
26354 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
26355 mach
= bfd_mach_arm_5T
;
26357 mach
= bfd_mach_arm_5
;
26359 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
26361 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
26362 mach
= bfd_mach_arm_4T
;
26364 mach
= bfd_mach_arm_4
;
26366 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
26367 mach
= bfd_mach_arm_3M
;
26368 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
26369 mach
= bfd_mach_arm_3
;
26370 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
26371 mach
= bfd_mach_arm_2a
;
26372 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
26373 mach
= bfd_mach_arm_2
;
26375 mach
= bfd_mach_arm_unknown
;
26377 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
26380 /* Command line processing. */
26383 Invocation line includes a switch not recognized by the base assembler.
26384 See if it's a processor-specific option.
26386 This routine is somewhat complicated by the need for backwards
26387 compatibility (since older releases of gcc can't be changed).
26388 The new options try to make the interface as compatible as
26391 New options (supported) are:
26393 -mcpu=<cpu name> Assemble for selected processor
26394 -march=<architecture name> Assemble for selected architecture
26395 -mfpu=<fpu architecture> Assemble for selected FPU.
26396 -EB/-mbig-endian Big-endian
26397 -EL/-mlittle-endian Little-endian
26398 -k Generate PIC code
26399 -mthumb Start in Thumb mode
26400 -mthumb-interwork Code supports ARM/Thumb interworking
26402 -m[no-]warn-deprecated Warn about deprecated features
26403 -m[no-]warn-syms Warn when symbols match instructions
26405 For now we will also provide support for:
26407 -mapcs-32 32-bit Program counter
26408 -mapcs-26 26-bit Program counter
26409 -macps-float Floats passed in FP registers
26410 -mapcs-reentrant Reentrant code
26412 (sometime these will probably be replaced with -mapcs=<list of options>
26413 and -matpcs=<list of options>)
26415 The remaining options are only supported for back-wards compatibility.
26416 Cpu variants, the arm part is optional:
26417 -m[arm]1 Currently not supported.
26418 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
26419 -m[arm]3 Arm 3 processor
26420 -m[arm]6[xx], Arm 6 processors
26421 -m[arm]7[xx][t][[d]m] Arm 7 processors
26422 -m[arm]8[10] Arm 8 processors
26423 -m[arm]9[20][tdmi] Arm 9 processors
26424 -mstrongarm[110[0]] StrongARM processors
26425 -mxscale XScale processors
26426 -m[arm]v[2345[t[e]]] Arm architectures
26427 -mall All (except the ARM1)
26429 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
26430 -mfpe-old (No float load/store multiples)
26431 -mvfpxd VFP Single precision
26433 -mno-fpu Disable all floating point instructions
26435 The following CPU names are recognized:
26436 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
26437 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
26438 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
26439 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
26440 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
26441 arm10t arm10e, arm1020t, arm1020e, arm10200e,
26442 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
26446 const char * md_shortopts
= "m:k";
26448 #ifdef ARM_BI_ENDIAN
26449 #define OPTION_EB (OPTION_MD_BASE + 0)
26450 #define OPTION_EL (OPTION_MD_BASE + 1)
26452 #if TARGET_BYTES_BIG_ENDIAN
26453 #define OPTION_EB (OPTION_MD_BASE + 0)
26455 #define OPTION_EL (OPTION_MD_BASE + 1)
26458 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
26459 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
26461 struct option md_longopts
[] =
26464 {"EB", no_argument
, NULL
, OPTION_EB
},
26467 {"EL", no_argument
, NULL
, OPTION_EL
},
26469 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
26471 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
26473 {NULL
, no_argument
, NULL
, 0}
26476 size_t md_longopts_size
= sizeof (md_longopts
);
26478 struct arm_option_table
26480 const char * option
; /* Option name to match. */
26481 const char * help
; /* Help information. */
26482 int * var
; /* Variable to change. */
26483 int value
; /* What to change it to. */
26484 const char * deprecated
; /* If non-null, print this message. */
26487 struct arm_option_table arm_opts
[] =
26489 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
26490 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
26491 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
26492 &support_interwork
, 1, NULL
},
26493 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
26494 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
26495 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
26497 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
26498 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
26499 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
26500 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
26503 /* These are recognized by the assembler, but have no affect on code. */
26504 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
26505 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
26507 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
26508 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
26509 &warn_on_deprecated
, 0, NULL
},
26510 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
26511 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
26512 {NULL
, NULL
, NULL
, 0, NULL
}
26515 struct arm_legacy_option_table
26517 const char * option
; /* Option name to match. */
26518 const arm_feature_set
** var
; /* Variable to change. */
26519 const arm_feature_set value
; /* What to change it to. */
26520 const char * deprecated
; /* If non-null, print this message. */
26523 const struct arm_legacy_option_table arm_legacy_opts
[] =
26525 /* DON'T add any new processors to this list -- we want the whole list
26526 to go away... Add them to the processors table instead. */
26527 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
26528 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
26529 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
26530 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
26531 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
26532 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
26533 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
26534 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
26535 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
26536 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
26537 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
26538 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
26539 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
26540 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
26541 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
26542 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
26543 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
26544 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
26545 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
26546 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
26547 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
26548 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
26549 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
26550 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
26551 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
26552 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
26553 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
26554 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
26555 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
26556 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
26557 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
26558 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
26559 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
26560 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
26561 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
26562 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
26563 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
26564 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
26565 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
26566 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
26567 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
26568 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
26569 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
26570 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
26571 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
26572 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
26573 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26574 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26575 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26576 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26577 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
26578 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
26579 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
26580 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
26581 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
26582 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
26583 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
26584 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
26585 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
26586 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
26587 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
26588 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
26589 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
26590 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
26591 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
26592 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
26593 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
26594 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
26595 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
26596 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
26597 N_("use -mcpu=strongarm110")},
26598 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
26599 N_("use -mcpu=strongarm1100")},
26600 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
26601 N_("use -mcpu=strongarm1110")},
26602 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
26603 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
26604 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
26606 /* Architecture variants -- don't add any more to this list either. */
26607 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
26608 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
26609 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
26610 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
26611 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
26612 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
26613 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
26614 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
26615 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
26616 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
26617 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
26618 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
26619 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
26620 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
26621 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
26622 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
26623 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
26624 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
26626 /* Floating point variants -- don't add any more to this list either. */
26627 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
26628 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
26629 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
26630 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
26631 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
26633 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
26636 struct arm_cpu_option_table
26640 const arm_feature_set value
;
26641 const arm_feature_set ext
;
26642 /* For some CPUs we assume an FPU unless the user explicitly sets
26644 const arm_feature_set default_fpu
;
26645 /* The canonical name of the CPU, or NULL to use NAME converted to upper
26647 const char * canonical_name
;
26650 /* This list should, at a minimum, contain all the cpu names
26651 recognized by GCC. */
26652 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
26654 static const struct arm_cpu_option_table arm_cpus
[] =
26656 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
26659 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
26662 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
26665 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
26668 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
26671 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
26674 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
26677 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
26680 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
26683 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
26686 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
26689 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
26692 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
26695 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
26698 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
26701 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
26704 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
26707 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
26710 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
26713 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
26716 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
26719 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
26722 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
26725 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
26728 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
26731 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
26734 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
26737 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
26740 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
26743 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
26746 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
26749 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
26752 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
26755 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
26758 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
26761 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
26764 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
26767 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
26770 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
26773 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
26776 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
26779 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
26782 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
26785 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
26788 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
26791 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
26795 /* For V5 or later processors we default to using VFP; but the user
26796 should really set the FPU type explicitly. */
26797 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
26800 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
26803 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
26806 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
26809 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
26812 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
26815 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
26818 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
26821 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
26824 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
26827 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
26830 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
26833 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
26836 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
26839 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
26842 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
26845 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
26848 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
26851 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
26854 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
26857 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
26860 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
26863 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
26866 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
26869 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
26872 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
26875 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
26878 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
26881 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
26884 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
26887 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
26890 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
26893 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
26896 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
26899 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
26902 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
26905 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
26906 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26908 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
26910 FPU_ARCH_NEON_VFP_V4
),
26911 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
26912 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
26913 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
26914 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
26915 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26916 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
26917 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
26919 FPU_ARCH_NEON_VFP_V4
),
26920 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
26922 FPU_ARCH_NEON_VFP_V4
),
26923 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
26925 FPU_ARCH_NEON_VFP_V4
),
26926 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
26927 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26928 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26929 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
26930 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26931 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26932 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
26933 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26934 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26935 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
26936 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26937 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26938 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
26939 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26940 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26941 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
26942 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26943 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26944 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
26945 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26946 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26947 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
26948 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26949 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26950 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
26951 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26952 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26953 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
26954 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26955 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26956 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
26959 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
26961 FPU_ARCH_VFP_V3D16
),
26962 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
26963 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26965 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
26966 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26967 FPU_ARCH_VFP_V3D16
),
26968 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
26969 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26970 FPU_ARCH_VFP_V3D16
),
26971 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
26972 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26973 FPU_ARCH_NEON_VFP_ARMV8
),
26974 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
26975 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
26977 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
26980 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
26983 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
26986 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
26989 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
26992 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
26995 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
26998 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
26999 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27000 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27001 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
27002 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27003 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27004 /* ??? XSCALE is really an architecture. */
27005 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
27009 /* ??? iwmmxt is not a processor. */
27010 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
27013 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
27016 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
27021 ARM_CPU_OPT ("ep9312", "ARM920T",
27022 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
27023 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
27025 /* Marvell processors. */
27026 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
27027 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27028 FPU_ARCH_VFP_V3D16
),
27029 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
27030 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27031 FPU_ARCH_NEON_VFP_V4
),
27033 /* APM X-Gene family. */
27034 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
27036 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27037 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
27038 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27039 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27041 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27045 struct arm_ext_table
27049 const arm_feature_set merge
;
27050 const arm_feature_set clear
;
27053 struct arm_arch_option_table
27057 const arm_feature_set value
;
27058 const arm_feature_set default_fpu
;
27059 const struct arm_ext_table
* ext_table
;
27062 /* Used to add support for +E and +noE extension. */
27063 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27064 /* Used to add support for a +E extension. */
27065 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27066 /* Used to add support for a +noE extension. */
27067 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27069 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27070 ~0 & ~FPU_ENDIAN_PURE)
27072 static const struct arm_ext_table armv5te_ext_table
[] =
27074 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
27075 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27078 static const struct arm_ext_table armv7_ext_table
[] =
27080 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27081 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27084 static const struct arm_ext_table armv7ve_ext_table
[] =
27086 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
27087 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
27088 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27089 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27090 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27091 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
27092 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27094 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
27095 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27097 /* Aliases for +simd. */
27098 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27100 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27101 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27102 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27104 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27107 static const struct arm_ext_table armv7a_ext_table
[] =
27109 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27110 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27111 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27112 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27113 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27114 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
27115 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27117 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
27118 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27120 /* Aliases for +simd. */
27121 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27122 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27124 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27125 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27127 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
27128 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
27129 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27132 static const struct arm_ext_table armv7r_ext_table
[] =
27134 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
27135 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
27136 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27137 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27138 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
27139 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27140 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27141 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
27142 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27145 static const struct arm_ext_table armv7em_ext_table
[] =
27147 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
27148 /* Alias for +fp, used to be known as fpv4-sp-d16. */
27149 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
27150 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
27151 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27152 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
27153 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27156 static const struct arm_ext_table armv8a_ext_table
[] =
27158 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27159 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27160 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27161 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27163 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27164 should use the +simd option to turn on FP. */
27165 ARM_REMOVE ("fp", ALL_FP
),
27166 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27167 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27168 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27172 static const struct arm_ext_table armv81a_ext_table
[] =
27174 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27175 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27176 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27178 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27179 should use the +simd option to turn on FP. */
27180 ARM_REMOVE ("fp", ALL_FP
),
27181 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27182 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27183 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27186 static const struct arm_ext_table armv82a_ext_table
[] =
27188 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27189 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
27190 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
27191 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27192 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27193 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27195 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27196 should use the +simd option to turn on FP. */
27197 ARM_REMOVE ("fp", ALL_FP
),
27198 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27199 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27200 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27203 static const struct arm_ext_table armv84a_ext_table
[] =
27205 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27206 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27207 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27208 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27210 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27211 should use the +simd option to turn on FP. */
27212 ARM_REMOVE ("fp", ALL_FP
),
27213 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27214 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27215 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27218 static const struct arm_ext_table armv85a_ext_table
[] =
27220 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27221 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27222 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27223 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27225 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27226 should use the +simd option to turn on FP. */
27227 ARM_REMOVE ("fp", ALL_FP
),
27228 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27231 static const struct arm_ext_table armv8m_main_ext_table
[] =
27233 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27234 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27235 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
27236 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27237 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27240 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
27242 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27243 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27245 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27246 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
27249 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27250 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
27251 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27254 static const struct arm_ext_table armv8r_ext_table
[] =
27256 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27257 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27258 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27259 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27260 ARM_REMOVE ("fp", ALL_FP
),
27261 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
27262 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27265 /* This list should, at a minimum, contain all the architecture names
27266 recognized by GCC. */
27267 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
27268 #define ARM_ARCH_OPT2(N, V, DF, ext) \
27269 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
27271 static const struct arm_arch_option_table arm_archs
[] =
27273 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
27274 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
27275 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
27276 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27277 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27278 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
27279 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
27280 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
27281 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
27282 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
27283 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
27284 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
27285 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
27286 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
27287 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
27288 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
27289 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
27290 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
27291 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
27292 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
27293 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
27294 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
27295 kept to preserve existing behaviour. */
27296 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
27297 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
27298 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
27299 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
27300 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
27301 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
27302 kept to preserve existing behaviour. */
27303 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
27304 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
27305 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
27306 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
27307 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
27308 /* The official spelling of the ARMv7 profile variants is the dashed form.
27309 Accept the non-dashed form for compatibility with old toolchains. */
27310 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
27311 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
27312 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
27313 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
27314 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
27315 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
27316 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
27317 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
27318 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
27319 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
27321 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
27323 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
27324 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
27325 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
27326 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
27327 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
27328 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
27329 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
27330 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
27331 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
27332 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
27333 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27335 #undef ARM_ARCH_OPT
27337 /* ISA extensions in the co-processor and main instruction set space. */
27339 struct arm_option_extension_value_table
27343 const arm_feature_set merge_value
;
27344 const arm_feature_set clear_value
;
27345 /* List of architectures for which an extension is available. ARM_ARCH_NONE
27346 indicates that an extension is available for all architectures while
27347 ARM_ANY marks an empty entry. */
27348 const arm_feature_set allowed_archs
[2];
27351 /* The following table must be in alphabetical order with a NULL last entry. */
27353 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
27354 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
27356 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
27357 use the context sensitive approach using arm_ext_table's. */
27358 static const struct arm_option_extension_value_table arm_extensions
[] =
27360 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27361 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27362 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27363 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
27364 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27365 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
27366 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
27368 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27369 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27370 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
27371 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
27372 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27373 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27374 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27376 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27377 | ARM_EXT2_FP16_FML
),
27378 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27379 | ARM_EXT2_FP16_FML
),
27381 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27382 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27383 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
27384 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
27385 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
27386 Thumb divide instruction. Due to this having the same name as the
27387 previous entry, this will be ignored when doing command-line parsing and
27388 only considered by build attribute selection code. */
27389 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
27390 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
27391 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
27392 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
27393 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
27394 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
27395 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
27396 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
27397 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
27398 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
27399 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
27400 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
27401 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
27402 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
27403 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
27404 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
27405 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
27406 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
27407 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27408 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
27409 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
27411 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
27412 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
27413 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27414 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
27415 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
27416 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27417 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
27418 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
27420 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27421 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27422 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
27423 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
27424 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
27425 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
27426 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27427 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
27429 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
27430 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
27431 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
27432 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
27433 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
27437 /* ISA floating-point and Advanced SIMD extensions. */
27438 struct arm_option_fpu_value_table
27441 const arm_feature_set value
;
27444 /* This list should, at a minimum, contain all the fpu names
27445 recognized by GCC. */
27446 static const struct arm_option_fpu_value_table arm_fpus
[] =
27448 {"softfpa", FPU_NONE
},
27449 {"fpe", FPU_ARCH_FPE
},
27450 {"fpe2", FPU_ARCH_FPE
},
27451 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
27452 {"fpa", FPU_ARCH_FPA
},
27453 {"fpa10", FPU_ARCH_FPA
},
27454 {"fpa11", FPU_ARCH_FPA
},
27455 {"arm7500fe", FPU_ARCH_FPA
},
27456 {"softvfp", FPU_ARCH_VFP
},
27457 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
27458 {"vfp", FPU_ARCH_VFP_V2
},
27459 {"vfp9", FPU_ARCH_VFP_V2
},
27460 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
27461 {"vfp10", FPU_ARCH_VFP_V2
},
27462 {"vfp10-r0", FPU_ARCH_VFP_V1
},
27463 {"vfpxd", FPU_ARCH_VFP_V1xD
},
27464 {"vfpv2", FPU_ARCH_VFP_V2
},
27465 {"vfpv3", FPU_ARCH_VFP_V3
},
27466 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
27467 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
27468 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
27469 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
27470 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
27471 {"arm1020t", FPU_ARCH_VFP_V1
},
27472 {"arm1020e", FPU_ARCH_VFP_V2
},
27473 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
27474 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
27475 {"maverick", FPU_ARCH_MAVERICK
},
27476 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
27477 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
27478 {"neon-fp16", FPU_ARCH_NEON_FP16
},
27479 {"vfpv4", FPU_ARCH_VFP_V4
},
27480 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
27481 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
27482 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
27483 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
27484 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
27485 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
27486 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
27487 {"crypto-neon-fp-armv8",
27488 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
27489 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
27490 {"crypto-neon-fp-armv8.1",
27491 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
27492 {NULL
, ARM_ARCH_NONE
}
27495 struct arm_option_value_table
27501 static const struct arm_option_value_table arm_float_abis
[] =
27503 {"hard", ARM_FLOAT_ABI_HARD
},
27504 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
27505 {"soft", ARM_FLOAT_ABI_SOFT
},
27510 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
27511 static const struct arm_option_value_table arm_eabis
[] =
27513 {"gnu", EF_ARM_EABI_UNKNOWN
},
27514 {"4", EF_ARM_EABI_VER4
},
27515 {"5", EF_ARM_EABI_VER5
},
27520 struct arm_long_option_table
27522 const char * option
; /* Substring to match. */
27523 const char * help
; /* Help information. */
27524 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
27525 const char * deprecated
; /* If non-null, print this message. */
27529 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
27530 arm_feature_set
*ext_set
,
27531 const struct arm_ext_table
*ext_table
)
27533 /* We insist on extensions being specified in alphabetical order, and with
27534 extensions being added before being removed. We achieve this by having
27535 the global ARM_EXTENSIONS table in alphabetical order, and using the
27536 ADDING_VALUE variable to indicate whether we are adding an extension (1)
27537 or removing it (0) and only allowing it to change in the order
27539 const struct arm_option_extension_value_table
* opt
= NULL
;
27540 const arm_feature_set arm_any
= ARM_ANY
;
27541 int adding_value
= -1;
27543 while (str
!= NULL
&& *str
!= 0)
27550 as_bad (_("invalid architectural extension"));
27555 ext
= strchr (str
, '+');
27560 len
= strlen (str
);
27562 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
27564 if (adding_value
!= 0)
27567 opt
= arm_extensions
;
27575 if (adding_value
== -1)
27578 opt
= arm_extensions
;
27580 else if (adding_value
!= 1)
27582 as_bad (_("must specify extensions to add before specifying "
27583 "those to remove"));
27590 as_bad (_("missing architectural extension"));
27594 gas_assert (adding_value
!= -1);
27595 gas_assert (opt
!= NULL
);
27597 if (ext_table
!= NULL
)
27599 const struct arm_ext_table
* ext_opt
= ext_table
;
27600 bfd_boolean found
= FALSE
;
27601 for (; ext_opt
->name
!= NULL
; ext_opt
++)
27602 if (ext_opt
->name_len
== len
27603 && strncmp (ext_opt
->name
, str
, len
) == 0)
27607 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
27608 /* TODO: Option not supported. When we remove the
27609 legacy table this case should error out. */
27612 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
27616 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
27617 /* TODO: Option not supported. When we remove the
27618 legacy table this case should error out. */
27620 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
27632 /* Scan over the options table trying to find an exact match. */
27633 for (; opt
->name
!= NULL
; opt
++)
27634 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27636 int i
, nb_allowed_archs
=
27637 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
27638 /* Check we can apply the extension to this architecture. */
27639 for (i
= 0; i
< nb_allowed_archs
; i
++)
27642 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
27644 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
27647 if (i
== nb_allowed_archs
)
27649 as_bad (_("extension does not apply to the base architecture"));
27653 /* Add or remove the extension. */
27655 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
27657 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
27659 /* Allowing Thumb division instructions for ARMv7 in autodetection
27660 rely on this break so that duplicate extensions (extensions
27661 with the same name as a previous extension in the list) are not
27662 considered for command-line parsing. */
27666 if (opt
->name
== NULL
)
27668 /* Did we fail to find an extension because it wasn't specified in
27669 alphabetical order, or because it does not exist? */
27671 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
27672 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27675 if (opt
->name
== NULL
)
27676 as_bad (_("unknown architectural extension `%s'"), str
);
27678 as_bad (_("architectural extensions must be specified in "
27679 "alphabetical order"));
27685 /* We should skip the extension we've just matched the next time
27697 arm_parse_cpu (const char *str
)
27699 const struct arm_cpu_option_table
*opt
;
27700 const char *ext
= strchr (str
, '+');
27706 len
= strlen (str
);
27710 as_bad (_("missing cpu name `%s'"), str
);
27714 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
27715 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27717 mcpu_cpu_opt
= &opt
->value
;
27718 if (mcpu_ext_opt
== NULL
)
27719 mcpu_ext_opt
= XNEW (arm_feature_set
);
27720 *mcpu_ext_opt
= opt
->ext
;
27721 mcpu_fpu_opt
= &opt
->default_fpu
;
27722 if (opt
->canonical_name
)
27724 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
27725 strcpy (selected_cpu_name
, opt
->canonical_name
);
27731 if (len
>= sizeof selected_cpu_name
)
27732 len
= (sizeof selected_cpu_name
) - 1;
27734 for (i
= 0; i
< len
; i
++)
27735 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
27736 selected_cpu_name
[i
] = 0;
27740 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
27745 as_bad (_("unknown cpu `%s'"), str
);
27750 arm_parse_arch (const char *str
)
27752 const struct arm_arch_option_table
*opt
;
27753 const char *ext
= strchr (str
, '+');
27759 len
= strlen (str
);
27763 as_bad (_("missing architecture name `%s'"), str
);
27767 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
27768 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27770 march_cpu_opt
= &opt
->value
;
27771 if (march_ext_opt
== NULL
)
27772 march_ext_opt
= XNEW (arm_feature_set
);
27773 *march_ext_opt
= arm_arch_none
;
27774 march_fpu_opt
= &opt
->default_fpu
;
27775 strcpy (selected_cpu_name
, opt
->name
);
27778 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
27784 as_bad (_("unknown architecture `%s'\n"), str
);
27789 arm_parse_fpu (const char * str
)
27791 const struct arm_option_fpu_value_table
* opt
;
27793 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
27794 if (streq (opt
->name
, str
))
27796 mfpu_opt
= &opt
->value
;
27800 as_bad (_("unknown floating point format `%s'\n"), str
);
27805 arm_parse_float_abi (const char * str
)
27807 const struct arm_option_value_table
* opt
;
27809 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
27810 if (streq (opt
->name
, str
))
27812 mfloat_abi_opt
= opt
->value
;
27816 as_bad (_("unknown floating point abi `%s'\n"), str
);
27822 arm_parse_eabi (const char * str
)
27824 const struct arm_option_value_table
*opt
;
27826 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
27827 if (streq (opt
->name
, str
))
27829 meabi_flags
= opt
->value
;
27832 as_bad (_("unknown EABI `%s'\n"), str
);
27838 arm_parse_it_mode (const char * str
)
27840 bfd_boolean ret
= TRUE
;
27842 if (streq ("arm", str
))
27843 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
27844 else if (streq ("thumb", str
))
27845 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
27846 else if (streq ("always", str
))
27847 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
27848 else if (streq ("never", str
))
27849 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
27852 as_bad (_("unknown implicit IT mode `%s', should be "\
27853 "arm, thumb, always, or never."), str
);
27861 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
27863 codecomposer_syntax
= TRUE
;
27864 arm_comment_chars
[0] = ';';
27865 arm_line_separator_chars
[0] = 0;
27869 struct arm_long_option_table arm_long_opts
[] =
27871 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
27872 arm_parse_cpu
, NULL
},
27873 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
27874 arm_parse_arch
, NULL
},
27875 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
27876 arm_parse_fpu
, NULL
},
27877 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
27878 arm_parse_float_abi
, NULL
},
27880 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
27881 arm_parse_eabi
, NULL
},
27883 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
27884 arm_parse_it_mode
, NULL
},
27885 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
27886 arm_ccs_mode
, NULL
},
27887 {NULL
, NULL
, 0, NULL
}
27891 md_parse_option (int c
, const char * arg
)
27893 struct arm_option_table
*opt
;
27894 const struct arm_legacy_option_table
*fopt
;
27895 struct arm_long_option_table
*lopt
;
27901 target_big_endian
= 1;
27907 target_big_endian
= 0;
27911 case OPTION_FIX_V4BX
:
27919 #endif /* OBJ_ELF */
27922 /* Listing option. Just ignore these, we don't support additional
27927 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
27929 if (c
== opt
->option
[0]
27930 && ((arg
== NULL
&& opt
->option
[1] == 0)
27931 || streq (arg
, opt
->option
+ 1)))
27933 /* If the option is deprecated, tell the user. */
27934 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
27935 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
27936 arg
? arg
: "", _(opt
->deprecated
));
27938 if (opt
->var
!= NULL
)
27939 *opt
->var
= opt
->value
;
27945 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
27947 if (c
== fopt
->option
[0]
27948 && ((arg
== NULL
&& fopt
->option
[1] == 0)
27949 || streq (arg
, fopt
->option
+ 1)))
27951 /* If the option is deprecated, tell the user. */
27952 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
27953 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
27954 arg
? arg
: "", _(fopt
->deprecated
));
27956 if (fopt
->var
!= NULL
)
27957 *fopt
->var
= &fopt
->value
;
27963 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
27965 /* These options are expected to have an argument. */
27966 if (c
== lopt
->option
[0]
27968 && strncmp (arg
, lopt
->option
+ 1,
27969 strlen (lopt
->option
+ 1)) == 0)
27971 /* If the option is deprecated, tell the user. */
27972 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
27973 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
27974 _(lopt
->deprecated
));
27976 /* Call the sup-option parser. */
27977 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
27988 md_show_usage (FILE * fp
)
27990 struct arm_option_table
*opt
;
27991 struct arm_long_option_table
*lopt
;
27993 fprintf (fp
, _(" ARM-specific assembler options:\n"));
27995 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
27996 if (opt
->help
!= NULL
)
27997 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
27999 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
28000 if (lopt
->help
!= NULL
)
28001 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
28005 -EB assemble code for a big-endian cpu\n"));
28010 -EL assemble code for a little-endian cpu\n"));
28014 --fix-v4bx Allow BX in ARMv4 code\n"));
28018 --fdpic generate an FDPIC object file\n"));
28019 #endif /* OBJ_ELF */
28027 arm_feature_set flags
;
28028 } cpu_arch_ver_table
;
28030 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28031 chronologically for architectures, with an exception for ARMv6-M and
28032 ARMv6S-M due to legacy reasons. No new architecture should have a
28033 special case. This allows for build attribute selection results to be
28034 stable when new architectures are added. */
28035 static const cpu_arch_ver_table cpu_arch_ver
[] =
28037 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
28038 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
28039 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
28040 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
28041 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
28042 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
28043 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
28044 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
28045 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
28046 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
28047 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
28048 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
28049 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
28050 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
28051 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
28052 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
28053 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
28054 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
28055 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
28056 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
28057 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
28058 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
28059 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
28060 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
28062 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28063 always selected build attributes to match those of ARMv6-M
28064 (resp. ARMv6S-M). However, due to these architectures being a strict
28065 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28066 would be selected when fully respecting chronology of architectures.
28067 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28068 move them before ARMv7 architectures. */
28069 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
28070 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
28072 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
28073 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
28074 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
28075 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
28076 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
28077 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
28078 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
28079 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
28080 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
28081 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
28082 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
28083 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
28084 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
28085 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
28086 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
28087 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
28088 {-1, ARM_ARCH_NONE
}
28091 /* Set an attribute if it has not already been set by the user. */
28094 aeabi_set_attribute_int (int tag
, int value
)
28097 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28098 || !attributes_set_explicitly
[tag
])
28099 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
28103 aeabi_set_attribute_string (int tag
, const char *value
)
28106 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28107 || !attributes_set_explicitly
[tag
])
28108 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
28111 /* Return whether features in the *NEEDED feature set are available via
28112 extensions for the architecture whose feature set is *ARCH_FSET. */
28115 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
28116 const arm_feature_set
*needed
)
28118 int i
, nb_allowed_archs
;
28119 arm_feature_set ext_fset
;
28120 const struct arm_option_extension_value_table
*opt
;
28122 ext_fset
= arm_arch_none
;
28123 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28125 /* Extension does not provide any feature we need. */
28126 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
28130 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28131 for (i
= 0; i
< nb_allowed_archs
; i
++)
28134 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
28137 /* Extension is available, add it. */
28138 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
28139 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
28143 /* Can we enable all features in *needed? */
28144 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
28147 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
28148 a given architecture feature set *ARCH_EXT_FSET including extension feature
28149 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
28150 - if true, check for an exact match of the architecture modulo extensions;
28151 - otherwise, select build attribute value of the first superset
28152 architecture released so that results remains stable when new architectures
28154 For -march/-mcpu=all the build attribute value of the most featureful
28155 architecture is returned. Tag_CPU_arch_profile result is returned in
28159 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
28160 const arm_feature_set
*ext_fset
,
28161 char *profile
, int exact_match
)
28163 arm_feature_set arch_fset
;
28164 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
28166 /* Select most featureful architecture with all its extensions if building
28167 for -march=all as the feature sets used to set build attributes. */
28168 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
28170 /* Force revisiting of decision for each new architecture. */
28171 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8_1M_MAIN
);
28173 return TAG_CPU_ARCH_V8
;
28176 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
28178 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
28180 arm_feature_set known_arch_fset
;
28182 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
28185 /* Base architecture match user-specified architecture and
28186 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
28187 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
28192 /* Base architecture match user-specified architecture only
28193 (eg. ARMv6-M in the same case as above). Record it in case we
28194 find a match with above condition. */
28195 else if (p_ver_ret
== NULL
28196 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
28202 /* Architecture has all features wanted. */
28203 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
28205 arm_feature_set added_fset
;
28207 /* Compute features added by this architecture over the one
28208 recorded in p_ver_ret. */
28209 if (p_ver_ret
!= NULL
)
28210 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
28212 /* First architecture that match incl. with extensions, or the
28213 only difference in features over the recorded match is
28214 features that were optional and are now mandatory. */
28215 if (p_ver_ret
== NULL
28216 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
28222 else if (p_ver_ret
== NULL
)
28224 arm_feature_set needed_ext_fset
;
28226 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
28228 /* Architecture has all features needed when using some
28229 extensions. Record it and continue searching in case there
28230 exist an architecture providing all needed features without
28231 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
28233 if (have_ext_for_needed_feat_p (&known_arch_fset
,
28240 if (p_ver_ret
== NULL
)
28244 /* Tag_CPU_arch_profile. */
28245 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
28246 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
28247 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
28248 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
28250 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
28252 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
28256 return p_ver_ret
->val
;
28259 /* Set the public EABI object attributes. */
28262 aeabi_set_public_attributes (void)
28264 char profile
= '\0';
28267 int fp16_optional
= 0;
28268 int skip_exact_match
= 0;
28269 arm_feature_set flags
, flags_arch
, flags_ext
;
28271 /* Autodetection mode, choose the architecture based the instructions
28273 if (no_cpu_selected ())
28275 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
28277 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
28278 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
28280 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
28281 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
28283 /* Code run during relaxation relies on selected_cpu being set. */
28284 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
28285 flags_ext
= arm_arch_none
;
28286 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
28287 selected_ext
= flags_ext
;
28288 selected_cpu
= flags
;
28290 /* Otherwise, choose the architecture based on the capabilities of the
28294 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
28295 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
28296 flags_ext
= selected_ext
;
28297 flags
= selected_cpu
;
28299 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
28301 /* Allow the user to override the reported architecture. */
28302 if (!ARM_FEATURE_ZERO (selected_object_arch
))
28304 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
28305 flags_ext
= arm_arch_none
;
28308 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
28310 /* When this function is run again after relaxation has happened there is no
28311 way to determine whether an architecture or CPU was specified by the user:
28312 - selected_cpu is set above for relaxation to work;
28313 - march_cpu_opt is not set if only -mcpu or .cpu is used;
28314 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
28315 Therefore, if not in -march=all case we first try an exact match and fall
28316 back to autodetection. */
28317 if (!skip_exact_match
)
28318 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
28320 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
28322 as_bad (_("no architecture contains all the instructions used\n"));
28324 /* Tag_CPU_name. */
28325 if (selected_cpu_name
[0])
28329 q
= selected_cpu_name
;
28330 if (strncmp (q
, "armv", 4) == 0)
28335 for (i
= 0; q
[i
]; i
++)
28336 q
[i
] = TOUPPER (q
[i
]);
28338 aeabi_set_attribute_string (Tag_CPU_name
, q
);
28341 /* Tag_CPU_arch. */
28342 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
28344 /* Tag_CPU_arch_profile. */
28345 if (profile
!= '\0')
28346 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
28348 /* Tag_DSP_extension. */
28349 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
28350 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
28352 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
28353 /* Tag_ARM_ISA_use. */
28354 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
28355 || ARM_FEATURE_ZERO (flags_arch
))
28356 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
28358 /* Tag_THUMB_ISA_use. */
28359 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
28360 || ARM_FEATURE_ZERO (flags_arch
))
28364 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
28365 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
28367 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
28371 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
28374 /* Tag_VFP_arch. */
28375 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
28376 aeabi_set_attribute_int (Tag_VFP_arch
,
28377 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
28379 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
28380 aeabi_set_attribute_int (Tag_VFP_arch
,
28381 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
28383 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
28386 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
28388 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
28390 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
28393 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
28394 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
28395 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
28396 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
28397 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
28399 /* Tag_ABI_HardFP_use. */
28400 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
28401 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
28402 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
28404 /* Tag_WMMX_arch. */
28405 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
28406 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
28407 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
28408 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
28410 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
28411 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
28412 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
28413 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
28414 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
28415 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
28417 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
28419 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
28423 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
28428 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
28429 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
28430 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
28434 We set Tag_DIV_use to two when integer divide instructions have been used
28435 in ARM state, or when Thumb integer divide instructions have been used,
28436 but we have no architecture profile set, nor have we any ARM instructions.
28438 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
28439 by the base architecture.
28441 For new architectures we will have to check these tests. */
28442 gas_assert (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
28443 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
28444 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
28445 aeabi_set_attribute_int (Tag_DIV_use
, 0);
28446 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
28447 || (profile
== '\0'
28448 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
28449 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
28450 aeabi_set_attribute_int (Tag_DIV_use
, 2);
28452 /* Tag_MP_extension_use. */
28453 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
28454 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
28456 /* Tag Virtualization_use. */
28457 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
28459 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
28462 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
28465 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
28466 finished and free extension feature bits which will not be used anymore. */
28469 arm_md_post_relax (void)
28471 aeabi_set_public_attributes ();
28472 XDELETE (mcpu_ext_opt
);
28473 mcpu_ext_opt
= NULL
;
28474 XDELETE (march_ext_opt
);
28475 march_ext_opt
= NULL
;
28478 /* Add the default contents for the .ARM.attributes section. */
28483 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
28486 aeabi_set_public_attributes ();
28488 #endif /* OBJ_ELF */
28490 /* Parse a .cpu directive. */
28493 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
28495 const struct arm_cpu_option_table
*opt
;
28499 name
= input_line_pointer
;
28500 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28501 input_line_pointer
++;
28502 saved_char
= *input_line_pointer
;
28503 *input_line_pointer
= 0;
28505 /* Skip the first "all" entry. */
28506 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
28507 if (streq (opt
->name
, name
))
28509 selected_arch
= opt
->value
;
28510 selected_ext
= opt
->ext
;
28511 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
28512 if (opt
->canonical_name
)
28513 strcpy (selected_cpu_name
, opt
->canonical_name
);
28517 for (i
= 0; opt
->name
[i
]; i
++)
28518 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
28520 selected_cpu_name
[i
] = 0;
28522 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28524 *input_line_pointer
= saved_char
;
28525 demand_empty_rest_of_line ();
28528 as_bad (_("unknown cpu `%s'"), name
);
28529 *input_line_pointer
= saved_char
;
28530 ignore_rest_of_line ();
28533 /* Parse a .arch directive. */
28536 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
28538 const struct arm_arch_option_table
*opt
;
28542 name
= input_line_pointer
;
28543 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28544 input_line_pointer
++;
28545 saved_char
= *input_line_pointer
;
28546 *input_line_pointer
= 0;
28548 /* Skip the first "all" entry. */
28549 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
28550 if (streq (opt
->name
, name
))
28552 selected_arch
= opt
->value
;
28553 selected_ext
= arm_arch_none
;
28554 selected_cpu
= selected_arch
;
28555 strcpy (selected_cpu_name
, opt
->name
);
28556 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28557 *input_line_pointer
= saved_char
;
28558 demand_empty_rest_of_line ();
28562 as_bad (_("unknown architecture `%s'\n"), name
);
28563 *input_line_pointer
= saved_char
;
28564 ignore_rest_of_line ();
28567 /* Parse a .object_arch directive. */
28570 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
28572 const struct arm_arch_option_table
*opt
;
28576 name
= input_line_pointer
;
28577 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28578 input_line_pointer
++;
28579 saved_char
= *input_line_pointer
;
28580 *input_line_pointer
= 0;
28582 /* Skip the first "all" entry. */
28583 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
28584 if (streq (opt
->name
, name
))
28586 selected_object_arch
= opt
->value
;
28587 *input_line_pointer
= saved_char
;
28588 demand_empty_rest_of_line ();
28592 as_bad (_("unknown architecture `%s'\n"), name
);
28593 *input_line_pointer
= saved_char
;
28594 ignore_rest_of_line ();
28597 /* Parse a .arch_extension directive. */
28600 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
28602 const struct arm_option_extension_value_table
*opt
;
28605 int adding_value
= 1;
28607 name
= input_line_pointer
;
28608 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28609 input_line_pointer
++;
28610 saved_char
= *input_line_pointer
;
28611 *input_line_pointer
= 0;
28613 if (strlen (name
) >= 2
28614 && strncmp (name
, "no", 2) == 0)
28620 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28621 if (streq (opt
->name
, name
))
28623 int i
, nb_allowed_archs
=
28624 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
28625 for (i
= 0; i
< nb_allowed_archs
; i
++)
28628 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
28630 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
28634 if (i
== nb_allowed_archs
)
28636 as_bad (_("architectural extension `%s' is not allowed for the "
28637 "current base architecture"), name
);
28642 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
28645 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
28647 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
28648 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28649 *input_line_pointer
= saved_char
;
28650 demand_empty_rest_of_line ();
28651 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
28652 on this return so that duplicate extensions (extensions with the
28653 same name as a previous extension in the list) are not considered
28654 for command-line parsing. */
28658 if (opt
->name
== NULL
)
28659 as_bad (_("unknown architecture extension `%s'\n"), name
);
28661 *input_line_pointer
= saved_char
;
28662 ignore_rest_of_line ();
28665 /* Parse a .fpu directive. */
28668 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
28670 const struct arm_option_fpu_value_table
*opt
;
28674 name
= input_line_pointer
;
28675 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28676 input_line_pointer
++;
28677 saved_char
= *input_line_pointer
;
28678 *input_line_pointer
= 0;
28680 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
28681 if (streq (opt
->name
, name
))
28683 selected_fpu
= opt
->value
;
28684 #ifndef CPU_DEFAULT
28685 if (no_cpu_selected ())
28686 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
28689 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28690 *input_line_pointer
= saved_char
;
28691 demand_empty_rest_of_line ();
28695 as_bad (_("unknown floating point format `%s'\n"), name
);
28696 *input_line_pointer
= saved_char
;
28697 ignore_rest_of_line ();
28700 /* Copy symbol information. */
28703 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
28705 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
28709 /* Given a symbolic attribute NAME, return the proper integer value.
28710 Returns -1 if the attribute is not known. */
28713 arm_convert_symbolic_attribute (const char *name
)
28715 static const struct
28720 attribute_table
[] =
28722 /* When you modify this table you should
28723 also modify the list in doc/c-arm.texi. */
28724 #define T(tag) {#tag, tag}
28725 T (Tag_CPU_raw_name
),
28728 T (Tag_CPU_arch_profile
),
28729 T (Tag_ARM_ISA_use
),
28730 T (Tag_THUMB_ISA_use
),
28734 T (Tag_Advanced_SIMD_arch
),
28735 T (Tag_PCS_config
),
28736 T (Tag_ABI_PCS_R9_use
),
28737 T (Tag_ABI_PCS_RW_data
),
28738 T (Tag_ABI_PCS_RO_data
),
28739 T (Tag_ABI_PCS_GOT_use
),
28740 T (Tag_ABI_PCS_wchar_t
),
28741 T (Tag_ABI_FP_rounding
),
28742 T (Tag_ABI_FP_denormal
),
28743 T (Tag_ABI_FP_exceptions
),
28744 T (Tag_ABI_FP_user_exceptions
),
28745 T (Tag_ABI_FP_number_model
),
28746 T (Tag_ABI_align_needed
),
28747 T (Tag_ABI_align8_needed
),
28748 T (Tag_ABI_align_preserved
),
28749 T (Tag_ABI_align8_preserved
),
28750 T (Tag_ABI_enum_size
),
28751 T (Tag_ABI_HardFP_use
),
28752 T (Tag_ABI_VFP_args
),
28753 T (Tag_ABI_WMMX_args
),
28754 T (Tag_ABI_optimization_goals
),
28755 T (Tag_ABI_FP_optimization_goals
),
28756 T (Tag_compatibility
),
28757 T (Tag_CPU_unaligned_access
),
28758 T (Tag_FP_HP_extension
),
28759 T (Tag_VFP_HP_extension
),
28760 T (Tag_ABI_FP_16bit_format
),
28761 T (Tag_MPextension_use
),
28763 T (Tag_nodefaults
),
28764 T (Tag_also_compatible_with
),
28765 T (Tag_conformance
),
28767 T (Tag_Virtualization_use
),
28768 T (Tag_DSP_extension
),
28769 /* We deliberately do not include Tag_MPextension_use_legacy. */
28777 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
28778 if (streq (name
, attribute_table
[i
].name
))
28779 return attribute_table
[i
].tag
;
28784 /* Apply sym value for relocations only in the case that they are for
28785 local symbols in the same segment as the fixup and you have the
28786 respective architectural feature for blx and simple switches. */
28789 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
28792 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
28793 /* PR 17444: If the local symbol is in a different section then a reloc
28794 will always be generated for it, so applying the symbol value now
28795 will result in a double offset being stored in the relocation. */
28796 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
28797 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
28799 switch (fixP
->fx_r_type
)
28801 case BFD_RELOC_ARM_PCREL_BLX
:
28802 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
28803 if (ARM_IS_FUNC (fixP
->fx_addsy
))
28807 case BFD_RELOC_ARM_PCREL_CALL
:
28808 case BFD_RELOC_THUMB_PCREL_BLX
:
28809 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
28820 #endif /* OBJ_ELF */