1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2015 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
164 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
165 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
168 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
171 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
172 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
173 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
174 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
175 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
176 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
177 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
178 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
179 static const arm_feature_set arm_ext_v4t_5
=
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
181 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
182 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
183 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
184 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
185 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
186 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
188 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
189 static const arm_feature_set arm_ext_v6_notm
=
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
191 static const arm_feature_set arm_ext_v6_dsp
=
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
193 static const arm_feature_set arm_ext_barrier
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
195 static const arm_feature_set arm_ext_msr
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
197 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
198 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
199 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
200 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
201 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
202 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
203 static const arm_feature_set arm_ext_m
=
204 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
);
205 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
206 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
207 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
208 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
209 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
211 static const arm_feature_set arm_arch_any
= ARM_ANY
;
212 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1, -1);
213 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
214 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
215 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
217 static const arm_feature_set arm_cext_iwmmxt2
=
218 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
219 static const arm_feature_set arm_cext_iwmmxt
=
220 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
221 static const arm_feature_set arm_cext_xscale
=
222 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
223 static const arm_feature_set arm_cext_maverick
=
224 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
225 static const arm_feature_set fpu_fpa_ext_v1
=
226 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
227 static const arm_feature_set fpu_fpa_ext_v2
=
228 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
229 static const arm_feature_set fpu_vfp_ext_v1xd
=
230 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
231 static const arm_feature_set fpu_vfp_ext_v1
=
232 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
233 static const arm_feature_set fpu_vfp_ext_v2
=
234 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
235 static const arm_feature_set fpu_vfp_ext_v3xd
=
236 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
237 static const arm_feature_set fpu_vfp_ext_v3
=
238 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
239 static const arm_feature_set fpu_vfp_ext_d32
=
240 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
241 static const arm_feature_set fpu_neon_ext_v1
=
242 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
243 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
244 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
245 static const arm_feature_set fpu_vfp_fp16
=
246 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
247 static const arm_feature_set fpu_neon_ext_fma
=
248 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
249 static const arm_feature_set fpu_vfp_ext_fma
=
250 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
251 static const arm_feature_set fpu_vfp_ext_armv8
=
252 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
253 static const arm_feature_set fpu_vfp_ext_armv8xd
=
254 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
255 static const arm_feature_set fpu_neon_ext_armv8
=
256 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
257 static const arm_feature_set fpu_crypto_ext_armv8
=
258 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
259 static const arm_feature_set crc_ext_armv8
=
260 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
262 static int mfloat_abi_opt
= -1;
263 /* Record user cpu selection for object attributes. */
264 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
265 /* Must be long enough to hold any of the names in arm_cpus. */
266 static char selected_cpu_name
[16];
268 extern FLONUM_TYPE generic_floating_point_number
;
270 /* Return if no cpu was selected on command-line. */
272 no_cpu_selected (void)
274 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
279 static int meabi_flags
= EABI_DEFAULT
;
281 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
284 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
289 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
294 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
295 symbolS
* GOT_symbol
;
298 /* 0: assemble for ARM,
299 1: assemble for Thumb,
300 2: assemble for Thumb even though target CPU does not support thumb
302 static int thumb_mode
= 0;
303 /* A value distinct from the possible values for thumb_mode that we
304 can use to record whether thumb_mode has been copied into the
305 tc_frag_data field of a frag. */
306 #define MODE_RECORDED (1 << 4)
308 /* Specifies the intrinsic IT insn behavior mode. */
309 enum implicit_it_mode
311 IMPLICIT_IT_MODE_NEVER
= 0x00,
312 IMPLICIT_IT_MODE_ARM
= 0x01,
313 IMPLICIT_IT_MODE_THUMB
= 0x02,
314 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
316 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
318 /* If unified_syntax is true, we are processing the new unified
319 ARM/Thumb syntax. Important differences from the old ARM mode:
321 - Immediate operands do not require a # prefix.
322 - Conditional affixes always appear at the end of the
323 instruction. (For backward compatibility, those instructions
324 that formerly had them in the middle, continue to accept them
326 - The IT instruction may appear, and if it does is validated
327 against subsequent conditional affixes. It does not generate
330 Important differences from the old Thumb mode:
332 - Immediate operands do not require a # prefix.
333 - Most of the V6T2 instructions are only available in unified mode.
334 - The .N and .W suffixes are recognized and honored (it is an error
335 if they cannot be honored).
336 - All instructions set the flags if and only if they have an 's' affix.
337 - Conditional affixes may be used. They are validated against
338 preceding IT instructions. Unlike ARM mode, you cannot use a
339 conditional affix except in the scope of an IT instruction. */
341 static bfd_boolean unified_syntax
= FALSE
;
343 /* An immediate operand can start with #, and ld*, st*, pld operands
344 can contain [ and ]. We need to tell APP not to elide whitespace
345 before a [, which can appear as the first operand for pld.
346 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
347 const char arm_symbol_chars
[] = "#[]{}";
362 enum neon_el_type type
;
366 #define NEON_MAX_TYPE_ELS 4
370 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
374 enum it_instruction_type
379 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
380 if inside, should be the last one. */
381 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
382 i.e. BKPT and NOP. */
383 IT_INSN
/* The IT insn has been parsed. */
386 /* The maximum number of operands we need. */
387 #define ARM_IT_MAX_OPERANDS 6
392 unsigned long instruction
;
396 /* "uncond_value" is set to the value in place of the conditional field in
397 unconditional versions of the instruction, or -1 if nothing is
400 struct neon_type vectype
;
401 /* This does not indicate an actual NEON instruction, only that
402 the mnemonic accepts neon-style type suffixes. */
404 /* Set to the opcode if the instruction needs relaxation.
405 Zero if the instruction is not relaxed. */
409 bfd_reloc_code_real_type type
;
414 enum it_instruction_type it_insn_type
;
420 struct neon_type_el vectype
;
421 unsigned present
: 1; /* Operand present. */
422 unsigned isreg
: 1; /* Operand was a register. */
423 unsigned immisreg
: 1; /* .imm field is a second register. */
424 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
425 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
426 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
427 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
428 instructions. This allows us to disambiguate ARM <-> vector insns. */
429 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
430 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
431 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
432 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
433 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
434 unsigned writeback
: 1; /* Operand has trailing ! */
435 unsigned preind
: 1; /* Preindexed address. */
436 unsigned postind
: 1; /* Postindexed address. */
437 unsigned negative
: 1; /* Index register was negated. */
438 unsigned shifted
: 1; /* Shift applied to operation. */
439 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
440 } operands
[ARM_IT_MAX_OPERANDS
];
443 static struct arm_it inst
;
445 #define NUM_FLOAT_VALS 8
447 const char * fp_const
[] =
449 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
452 /* Number of littlenums required to hold an extended precision number. */
453 #define MAX_LITTLENUMS 6
455 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
465 #define CP_T_X 0x00008000
466 #define CP_T_Y 0x00400000
468 #define CONDS_BIT 0x00100000
469 #define LOAD_BIT 0x00100000
471 #define DOUBLE_LOAD_FLAG 0x00000001
475 const char * template_name
;
479 #define COND_ALWAYS 0xE
483 const char * template_name
;
487 struct asm_barrier_opt
489 const char * template_name
;
491 const arm_feature_set arch
;
494 /* The bit that distinguishes CPSR and SPSR. */
495 #define SPSR_BIT (1 << 22)
497 /* The individual PSR flag bits. */
498 #define PSR_c (1 << 16)
499 #define PSR_x (1 << 17)
500 #define PSR_s (1 << 18)
501 #define PSR_f (1 << 19)
506 bfd_reloc_code_real_type reloc
;
511 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
512 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
517 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
520 /* Bits for DEFINED field in neon_typed_alias. */
521 #define NTA_HASTYPE 1
522 #define NTA_HASINDEX 2
524 struct neon_typed_alias
526 unsigned char defined
;
528 struct neon_type_el eltype
;
531 /* ARM register categories. This includes coprocessor numbers and various
532 architecture extensions' registers. */
559 /* Structure for a hash table entry for a register.
560 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
561 information which states whether a vector type or index is specified (for a
562 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
568 unsigned char builtin
;
569 struct neon_typed_alias
* neon
;
572 /* Diagnostics used when we don't get a register of the expected type. */
573 const char * const reg_expected_msgs
[] =
575 N_("ARM register expected"),
576 N_("bad or missing co-processor number"),
577 N_("co-processor register expected"),
578 N_("FPA register expected"),
579 N_("VFP single precision register expected"),
580 N_("VFP/Neon double precision register expected"),
581 N_("Neon quad precision register expected"),
582 N_("VFP single or double precision register expected"),
583 N_("Neon double or quad precision register expected"),
584 N_("VFP single, double or Neon quad precision register expected"),
585 N_("VFP system register expected"),
586 N_("Maverick MVF register expected"),
587 N_("Maverick MVD register expected"),
588 N_("Maverick MVFX register expected"),
589 N_("Maverick MVDX register expected"),
590 N_("Maverick MVAX register expected"),
591 N_("Maverick DSPSC register expected"),
592 N_("iWMMXt data register expected"),
593 N_("iWMMXt control register expected"),
594 N_("iWMMXt scalar register expected"),
595 N_("XScale accumulator register expected"),
598 /* Some well known registers that we refer to directly elsewhere. */
604 /* ARM instructions take 4bytes in the object file, Thumb instructions
610 /* Basic string to match. */
611 const char * template_name
;
613 /* Parameters to instruction. */
614 unsigned int operands
[8];
616 /* Conditional tag - see opcode_lookup. */
617 unsigned int tag
: 4;
619 /* Basic instruction code. */
620 unsigned int avalue
: 28;
622 /* Thumb-format instruction code. */
625 /* Which architecture variant provides this instruction. */
626 const arm_feature_set
* avariant
;
627 const arm_feature_set
* tvariant
;
629 /* Function to call to encode instruction in ARM format. */
630 void (* aencode
) (void);
632 /* Function to call to encode instruction in Thumb format. */
633 void (* tencode
) (void);
636 /* Defines for various bits that we will want to toggle. */
637 #define INST_IMMEDIATE 0x02000000
638 #define OFFSET_REG 0x02000000
639 #define HWOFFSET_IMM 0x00400000
640 #define SHIFT_BY_REG 0x00000010
641 #define PRE_INDEX 0x01000000
642 #define INDEX_UP 0x00800000
643 #define WRITE_BACK 0x00200000
644 #define LDM_TYPE_2_OR_3 0x00400000
645 #define CPSI_MMOD 0x00020000
647 #define LITERAL_MASK 0xf000f000
648 #define OPCODE_MASK 0xfe1fffff
649 #define V4_STR_BIT 0x00000020
650 #define VLDR_VMOV_SAME 0x0040f000
652 #define T2_SUBS_PC_LR 0xf3de8f00
654 #define DATA_OP_SHIFT 21
656 #define T2_OPCODE_MASK 0xfe1fffff
657 #define T2_DATA_OP_SHIFT 21
659 #define A_COND_MASK 0xf0000000
660 #define A_PUSH_POP_OP_MASK 0x0fff0000
662 /* Opcodes for pushing/poping registers to/from the stack. */
663 #define A1_OPCODE_PUSH 0x092d0000
664 #define A2_OPCODE_PUSH 0x052d0004
665 #define A2_OPCODE_POP 0x049d0004
667 /* Codes to distinguish the arithmetic instructions. */
678 #define OPCODE_CMP 10
679 #define OPCODE_CMN 11
680 #define OPCODE_ORR 12
681 #define OPCODE_MOV 13
682 #define OPCODE_BIC 14
683 #define OPCODE_MVN 15
685 #define T2_OPCODE_AND 0
686 #define T2_OPCODE_BIC 1
687 #define T2_OPCODE_ORR 2
688 #define T2_OPCODE_ORN 3
689 #define T2_OPCODE_EOR 4
690 #define T2_OPCODE_ADD 8
691 #define T2_OPCODE_ADC 10
692 #define T2_OPCODE_SBC 11
693 #define T2_OPCODE_SUB 13
694 #define T2_OPCODE_RSB 14
696 #define T_OPCODE_MUL 0x4340
697 #define T_OPCODE_TST 0x4200
698 #define T_OPCODE_CMN 0x42c0
699 #define T_OPCODE_NEG 0x4240
700 #define T_OPCODE_MVN 0x43c0
702 #define T_OPCODE_ADD_R3 0x1800
703 #define T_OPCODE_SUB_R3 0x1a00
704 #define T_OPCODE_ADD_HI 0x4400
705 #define T_OPCODE_ADD_ST 0xb000
706 #define T_OPCODE_SUB_ST 0xb080
707 #define T_OPCODE_ADD_SP 0xa800
708 #define T_OPCODE_ADD_PC 0xa000
709 #define T_OPCODE_ADD_I8 0x3000
710 #define T_OPCODE_SUB_I8 0x3800
711 #define T_OPCODE_ADD_I3 0x1c00
712 #define T_OPCODE_SUB_I3 0x1e00
714 #define T_OPCODE_ASR_R 0x4100
715 #define T_OPCODE_LSL_R 0x4080
716 #define T_OPCODE_LSR_R 0x40c0
717 #define T_OPCODE_ROR_R 0x41c0
718 #define T_OPCODE_ASR_I 0x1000
719 #define T_OPCODE_LSL_I 0x0000
720 #define T_OPCODE_LSR_I 0x0800
722 #define T_OPCODE_MOV_I8 0x2000
723 #define T_OPCODE_CMP_I8 0x2800
724 #define T_OPCODE_CMP_LR 0x4280
725 #define T_OPCODE_MOV_HR 0x4600
726 #define T_OPCODE_CMP_HR 0x4500
728 #define T_OPCODE_LDR_PC 0x4800
729 #define T_OPCODE_LDR_SP 0x9800
730 #define T_OPCODE_STR_SP 0x9000
731 #define T_OPCODE_LDR_IW 0x6800
732 #define T_OPCODE_STR_IW 0x6000
733 #define T_OPCODE_LDR_IH 0x8800
734 #define T_OPCODE_STR_IH 0x8000
735 #define T_OPCODE_LDR_IB 0x7800
736 #define T_OPCODE_STR_IB 0x7000
737 #define T_OPCODE_LDR_RW 0x5800
738 #define T_OPCODE_STR_RW 0x5000
739 #define T_OPCODE_LDR_RH 0x5a00
740 #define T_OPCODE_STR_RH 0x5200
741 #define T_OPCODE_LDR_RB 0x5c00
742 #define T_OPCODE_STR_RB 0x5400
744 #define T_OPCODE_PUSH 0xb400
745 #define T_OPCODE_POP 0xbc00
747 #define T_OPCODE_BRANCH 0xe000
749 #define THUMB_SIZE 2 /* Size of thumb instruction. */
750 #define THUMB_PP_PC_LR 0x0100
751 #define THUMB_LOAD_BIT 0x0800
752 #define THUMB2_LOAD_BIT 0x00100000
754 #define BAD_ARGS _("bad arguments to instruction")
755 #define BAD_SP _("r13 not allowed here")
756 #define BAD_PC _("r15 not allowed here")
757 #define BAD_COND _("instruction cannot be conditional")
758 #define BAD_OVERLAP _("registers may not be the same")
759 #define BAD_HIREG _("lo register required")
760 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
761 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
762 #define BAD_BRANCH _("branch must be last instruction in IT block")
763 #define BAD_NOT_IT _("instruction not allowed in IT block")
764 #define BAD_FPU _("selected FPU does not support instruction")
765 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
766 #define BAD_IT_COND _("incorrect condition in IT block")
767 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
768 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
769 #define BAD_PC_ADDRESSING \
770 _("cannot use register index with PC-relative addressing")
771 #define BAD_PC_WRITEBACK \
772 _("cannot use writeback with PC-relative addressing")
773 #define BAD_RANGE _("branch out of range")
774 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
776 static struct hash_control
* arm_ops_hsh
;
777 static struct hash_control
* arm_cond_hsh
;
778 static struct hash_control
* arm_shift_hsh
;
779 static struct hash_control
* arm_psr_hsh
;
780 static struct hash_control
* arm_v7m_psr_hsh
;
781 static struct hash_control
* arm_reg_hsh
;
782 static struct hash_control
* arm_reloc_hsh
;
783 static struct hash_control
* arm_barrier_opt_hsh
;
785 /* Stuff needed to resolve the label ambiguity
794 symbolS
* last_label_seen
;
795 static int label_is_thumb_function_name
= FALSE
;
797 /* Literal pool structure. Held on a per-section
798 and per-sub-section basis. */
800 #define MAX_LITERAL_POOL_SIZE 1024
801 typedef struct literal_pool
803 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
804 unsigned int next_free_entry
;
810 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
812 struct literal_pool
* next
;
813 unsigned int alignment
;
816 /* Pointer to a linked list of literal pools. */
817 literal_pool
* list_of_pools
= NULL
;
819 typedef enum asmfunc_states
822 WAITING_ASMFUNC_NAME
,
826 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
829 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
831 static struct current_it now_it
;
835 now_it_compatible (int cond
)
837 return (cond
& ~1) == (now_it
.cc
& ~1);
841 conditional_insn (void)
843 return inst
.cond
!= COND_ALWAYS
;
846 static int in_it_block (void);
848 static int handle_it_state (void);
850 static void force_automatic_it_block_close (void);
852 static void it_fsm_post_encode (void);
854 #define set_it_insn_type(type) \
857 inst.it_insn_type = type; \
858 if (handle_it_state () == FAIL) \
863 #define set_it_insn_type_nonvoid(type, failret) \
866 inst.it_insn_type = type; \
867 if (handle_it_state () == FAIL) \
872 #define set_it_insn_type_last() \
875 if (inst.cond == COND_ALWAYS) \
876 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
878 set_it_insn_type (INSIDE_IT_LAST_INSN); \
884 /* This array holds the chars that always start a comment. If the
885 pre-processor is disabled, these aren't very useful. */
886 char arm_comment_chars
[] = "@";
888 /* This array holds the chars that only start a comment at the beginning of
889 a line. If the line seems to have the form '# 123 filename'
890 .line and .file directives will appear in the pre-processed output. */
891 /* Note that input_file.c hand checks for '#' at the beginning of the
892 first line of the input file. This is because the compiler outputs
893 #NO_APP at the beginning of its output. */
894 /* Also note that comments like this one will always work. */
895 const char line_comment_chars
[] = "#";
897 char arm_line_separator_chars
[] = ";";
899 /* Chars that can be used to separate mant
900 from exp in floating point numbers. */
901 const char EXP_CHARS
[] = "eE";
903 /* Chars that mean this number is a floating point constant. */
907 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
909 /* Prefix characters that indicate the start of an immediate
911 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
913 /* Separator character handling. */
915 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
918 skip_past_char (char ** str
, char c
)
920 /* PR gas/14987: Allow for whitespace before the expected character. */
921 skip_whitespace (*str
);
932 #define skip_past_comma(str) skip_past_char (str, ',')
934 /* Arithmetic expressions (possibly involving symbols). */
936 /* Return TRUE if anything in the expression is a bignum. */
939 walk_no_bignums (symbolS
* sp
)
941 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
944 if (symbol_get_value_expression (sp
)->X_add_symbol
)
946 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
947 || (symbol_get_value_expression (sp
)->X_op_symbol
948 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
954 static int in_my_get_expression
= 0;
956 /* Third argument to my_get_expression. */
957 #define GE_NO_PREFIX 0
958 #define GE_IMM_PREFIX 1
959 #define GE_OPT_PREFIX 2
960 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
961 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
962 #define GE_OPT_PREFIX_BIG 3
965 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
970 /* In unified syntax, all prefixes are optional. */
972 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
977 case GE_NO_PREFIX
: break;
979 if (!is_immediate_prefix (**str
))
981 inst
.error
= _("immediate expression requires a # prefix");
987 case GE_OPT_PREFIX_BIG
:
988 if (is_immediate_prefix (**str
))
994 memset (ep
, 0, sizeof (expressionS
));
996 save_in
= input_line_pointer
;
997 input_line_pointer
= *str
;
998 in_my_get_expression
= 1;
999 seg
= expression (ep
);
1000 in_my_get_expression
= 0;
1002 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1004 /* We found a bad or missing expression in md_operand(). */
1005 *str
= input_line_pointer
;
1006 input_line_pointer
= save_in
;
1007 if (inst
.error
== NULL
)
1008 inst
.error
= (ep
->X_op
== O_absent
1009 ? _("missing expression") :_("bad expression"));
1014 if (seg
!= absolute_section
1015 && seg
!= text_section
1016 && seg
!= data_section
1017 && seg
!= bss_section
1018 && seg
!= undefined_section
)
1020 inst
.error
= _("bad segment");
1021 *str
= input_line_pointer
;
1022 input_line_pointer
= save_in
;
1029 /* Get rid of any bignums now, so that we don't generate an error for which
1030 we can't establish a line number later on. Big numbers are never valid
1031 in instructions, which is where this routine is always called. */
1032 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1033 && (ep
->X_op
== O_big
1034 || (ep
->X_add_symbol
1035 && (walk_no_bignums (ep
->X_add_symbol
)
1037 && walk_no_bignums (ep
->X_op_symbol
))))))
1039 inst
.error
= _("invalid constant");
1040 *str
= input_line_pointer
;
1041 input_line_pointer
= save_in
;
1045 *str
= input_line_pointer
;
1046 input_line_pointer
= save_in
;
1050 /* Turn a string in input_line_pointer into a floating point constant
1051 of type TYPE, and store the appropriate bytes in *LITP. The number
1052 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1053 returned, or NULL on OK.
1055 Note that fp constants aren't represent in the normal way on the ARM.
1056 In big endian mode, things are as expected. However, in little endian
1057 mode fp constants are big-endian word-wise, and little-endian byte-wise
1058 within the words. For example, (double) 1.1 in big endian mode is
1059 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1060 the byte sequence 99 99 f1 3f 9a 99 99 99.
1062 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1065 md_atof (int type
, char * litP
, int * sizeP
)
1068 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1100 return _("Unrecognized or unsupported floating point constant");
1103 t
= atof_ieee (input_line_pointer
, type
, words
);
1105 input_line_pointer
= t
;
1106 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1108 if (target_big_endian
)
1110 for (i
= 0; i
< prec
; i
++)
1112 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1113 litP
+= sizeof (LITTLENUM_TYPE
);
1118 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1119 for (i
= prec
- 1; i
>= 0; i
--)
1121 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1122 litP
+= sizeof (LITTLENUM_TYPE
);
1125 /* For a 4 byte float the order of elements in `words' is 1 0.
1126 For an 8 byte float the order is 1 0 3 2. */
1127 for (i
= 0; i
< prec
; i
+= 2)
1129 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1130 sizeof (LITTLENUM_TYPE
));
1131 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1132 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1133 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1140 /* We handle all bad expressions here, so that we can report the faulty
1141 instruction in the error message. */
1143 md_operand (expressionS
* exp
)
1145 if (in_my_get_expression
)
1146 exp
->X_op
= O_illegal
;
1149 /* Immediate values. */
1151 /* Generic immediate-value read function for use in directives.
1152 Accepts anything that 'expression' can fold to a constant.
1153 *val receives the number. */
1156 immediate_for_directive (int *val
)
1159 exp
.X_op
= O_illegal
;
1161 if (is_immediate_prefix (*input_line_pointer
))
1163 input_line_pointer
++;
1167 if (exp
.X_op
!= O_constant
)
1169 as_bad (_("expected #constant"));
1170 ignore_rest_of_line ();
1173 *val
= exp
.X_add_number
;
1178 /* Register parsing. */
1180 /* Generic register parser. CCP points to what should be the
1181 beginning of a register name. If it is indeed a valid register
1182 name, advance CCP over it and return the reg_entry structure;
1183 otherwise return NULL. Does not issue diagnostics. */
1185 static struct reg_entry
*
1186 arm_reg_parse_multi (char **ccp
)
1190 struct reg_entry
*reg
;
1192 skip_whitespace (start
);
1194 #ifdef REGISTER_PREFIX
1195 if (*start
!= REGISTER_PREFIX
)
1199 #ifdef OPTIONAL_REGISTER_PREFIX
1200 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1205 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1210 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1212 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1222 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1223 enum arm_reg_type type
)
1225 /* Alternative syntaxes are accepted for a few register classes. */
1232 /* Generic coprocessor register names are allowed for these. */
1233 if (reg
&& reg
->type
== REG_TYPE_CN
)
1238 /* For backward compatibility, a bare number is valid here. */
1240 unsigned long processor
= strtoul (start
, ccp
, 10);
1241 if (*ccp
!= start
&& processor
<= 15)
1245 case REG_TYPE_MMXWC
:
1246 /* WC includes WCG. ??? I'm not sure this is true for all
1247 instructions that take WC registers. */
1248 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1259 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1260 return value is the register number or FAIL. */
1263 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1266 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1269 /* Do not allow a scalar (reg+index) to parse as a register. */
1270 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1273 if (reg
&& reg
->type
== type
)
1276 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1283 /* Parse a Neon type specifier. *STR should point at the leading '.'
1284 character. Does no verification at this stage that the type fits the opcode
1291 Can all be legally parsed by this function.
1293 Fills in neon_type struct pointer with parsed information, and updates STR
1294 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1295 type, FAIL if not. */
1298 parse_neon_type (struct neon_type
*type
, char **str
)
1305 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1307 enum neon_el_type thistype
= NT_untyped
;
1308 unsigned thissize
= -1u;
1315 /* Just a size without an explicit type. */
1319 switch (TOLOWER (*ptr
))
1321 case 'i': thistype
= NT_integer
; break;
1322 case 'f': thistype
= NT_float
; break;
1323 case 'p': thistype
= NT_poly
; break;
1324 case 's': thistype
= NT_signed
; break;
1325 case 'u': thistype
= NT_unsigned
; break;
1327 thistype
= NT_float
;
1332 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1338 /* .f is an abbreviation for .f32. */
1339 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1344 thissize
= strtoul (ptr
, &ptr
, 10);
1346 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1349 as_bad (_("bad size %d in type specifier"), thissize
);
1357 type
->el
[type
->elems
].type
= thistype
;
1358 type
->el
[type
->elems
].size
= thissize
;
1363 /* Empty/missing type is not a successful parse. */
1364 if (type
->elems
== 0)
1372 /* Errors may be set multiple times during parsing or bit encoding
1373 (particularly in the Neon bits), but usually the earliest error which is set
1374 will be the most meaningful. Avoid overwriting it with later (cascading)
1375 errors by calling this function. */
1378 first_error (const char *err
)
1384 /* Parse a single type, e.g. ".s32", leading period included. */
1386 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1389 struct neon_type optype
;
1393 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1395 if (optype
.elems
== 1)
1396 *vectype
= optype
.el
[0];
1399 first_error (_("only one type should be specified for operand"));
1405 first_error (_("vector type expected"));
1417 /* Special meanings for indices (which have a range of 0-7), which will fit into
1420 #define NEON_ALL_LANES 15
1421 #define NEON_INTERLEAVE_LANES 14
1423 /* Parse either a register or a scalar, with an optional type. Return the
1424 register number, and optionally fill in the actual type of the register
1425 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1426 type/index information in *TYPEINFO. */
1429 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1430 enum arm_reg_type
*rtype
,
1431 struct neon_typed_alias
*typeinfo
)
1434 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1435 struct neon_typed_alias atype
;
1436 struct neon_type_el parsetype
;
1440 atype
.eltype
.type
= NT_invtype
;
1441 atype
.eltype
.size
= -1;
1443 /* Try alternate syntax for some types of register. Note these are mutually
1444 exclusive with the Neon syntax extensions. */
1447 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1455 /* Undo polymorphism when a set of register types may be accepted. */
1456 if ((type
== REG_TYPE_NDQ
1457 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1458 || (type
== REG_TYPE_VFSD
1459 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1460 || (type
== REG_TYPE_NSDQ
1461 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1462 || reg
->type
== REG_TYPE_NQ
))
1463 || (type
== REG_TYPE_MMXWC
1464 && (reg
->type
== REG_TYPE_MMXWCG
)))
1465 type
= (enum arm_reg_type
) reg
->type
;
1467 if (type
!= reg
->type
)
1473 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1475 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1477 first_error (_("can't redefine type for operand"));
1480 atype
.defined
|= NTA_HASTYPE
;
1481 atype
.eltype
= parsetype
;
1484 if (skip_past_char (&str
, '[') == SUCCESS
)
1486 if (type
!= REG_TYPE_VFD
)
1488 first_error (_("only D registers may be indexed"));
1492 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1494 first_error (_("can't change index for operand"));
1498 atype
.defined
|= NTA_HASINDEX
;
1500 if (skip_past_char (&str
, ']') == SUCCESS
)
1501 atype
.index
= NEON_ALL_LANES
;
1506 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1508 if (exp
.X_op
!= O_constant
)
1510 first_error (_("constant expression required"));
1514 if (skip_past_char (&str
, ']') == FAIL
)
1517 atype
.index
= exp
.X_add_number
;
1532 /* Like arm_reg_parse, but allow allow the following extra features:
1533 - If RTYPE is non-zero, return the (possibly restricted) type of the
1534 register (e.g. Neon double or quad reg when either has been requested).
1535 - If this is a Neon vector type with additional type information, fill
1536 in the struct pointed to by VECTYPE (if non-NULL).
1537 This function will fault on encountering a scalar. */
1540 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1541 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1543 struct neon_typed_alias atype
;
1545 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1550 /* Do not allow regname(... to parse as a register. */
1554 /* Do not allow a scalar (reg+index) to parse as a register. */
1555 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1557 first_error (_("register operand expected, but got scalar"));
1562 *vectype
= atype
.eltype
;
1569 #define NEON_SCALAR_REG(X) ((X) >> 4)
1570 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1572 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1573 have enough information to be able to do a good job bounds-checking. So, we
1574 just do easy checks here, and do further checks later. */
1577 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1581 struct neon_typed_alias atype
;
1583 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1585 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1588 if (atype
.index
== NEON_ALL_LANES
)
1590 first_error (_("scalar must have an index"));
1593 else if (atype
.index
>= 64 / elsize
)
1595 first_error (_("scalar index out of range"));
1600 *type
= atype
.eltype
;
1604 return reg
* 16 + atype
.index
;
1607 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1610 parse_reg_list (char ** strp
)
1612 char * str
= * strp
;
1616 /* We come back here if we get ranges concatenated by '+' or '|'. */
1619 skip_whitespace (str
);
1633 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1635 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1645 first_error (_("bad range in register list"));
1649 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1651 if (range
& (1 << i
))
1653 (_("Warning: duplicated register (r%d) in register list"),
1661 if (range
& (1 << reg
))
1662 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1664 else if (reg
<= cur_reg
)
1665 as_tsktsk (_("Warning: register range not in ascending order"));
1670 while (skip_past_comma (&str
) != FAIL
1671 || (in_range
= 1, *str
++ == '-'));
1674 if (skip_past_char (&str
, '}') == FAIL
)
1676 first_error (_("missing `}'"));
1684 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1687 if (exp
.X_op
== O_constant
)
1689 if (exp
.X_add_number
1690 != (exp
.X_add_number
& 0x0000ffff))
1692 inst
.error
= _("invalid register mask");
1696 if ((range
& exp
.X_add_number
) != 0)
1698 int regno
= range
& exp
.X_add_number
;
1701 regno
= (1 << regno
) - 1;
1703 (_("Warning: duplicated register (r%d) in register list"),
1707 range
|= exp
.X_add_number
;
1711 if (inst
.reloc
.type
!= 0)
1713 inst
.error
= _("expression too complex");
1717 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1718 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1719 inst
.reloc
.pc_rel
= 0;
1723 if (*str
== '|' || *str
== '+')
1729 while (another_range
);
1735 /* Types of registers in a list. */
1744 /* Parse a VFP register list. If the string is invalid return FAIL.
1745 Otherwise return the number of registers, and set PBASE to the first
1746 register. Parses registers of type ETYPE.
1747 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1748 - Q registers can be used to specify pairs of D registers
1749 - { } can be omitted from around a singleton register list
1750 FIXME: This is not implemented, as it would require backtracking in
1753 This could be done (the meaning isn't really ambiguous), but doesn't
1754 fit in well with the current parsing framework.
1755 - 32 D registers may be used (also true for VFPv3).
1756 FIXME: Types are ignored in these register lists, which is probably a
1760 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1765 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1769 unsigned long mask
= 0;
1772 if (skip_past_char (&str
, '{') == FAIL
)
1774 inst
.error
= _("expecting {");
1781 regtype
= REG_TYPE_VFS
;
1786 regtype
= REG_TYPE_VFD
;
1789 case REGLIST_NEON_D
:
1790 regtype
= REG_TYPE_NDQ
;
1794 if (etype
!= REGLIST_VFP_S
)
1796 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1797 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1801 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1804 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1811 base_reg
= max_regs
;
1815 int setmask
= 1, addregs
= 1;
1817 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1819 if (new_base
== FAIL
)
1821 first_error (_(reg_expected_msgs
[regtype
]));
1825 if (new_base
>= max_regs
)
1827 first_error (_("register out of range in list"));
1831 /* Note: a value of 2 * n is returned for the register Q<n>. */
1832 if (regtype
== REG_TYPE_NQ
)
1838 if (new_base
< base_reg
)
1839 base_reg
= new_base
;
1841 if (mask
& (setmask
<< new_base
))
1843 first_error (_("invalid register list"));
1847 if ((mask
>> new_base
) != 0 && ! warned
)
1849 as_tsktsk (_("register list not in ascending order"));
1853 mask
|= setmask
<< new_base
;
1856 if (*str
== '-') /* We have the start of a range expression */
1862 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1865 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1869 if (high_range
>= max_regs
)
1871 first_error (_("register out of range in list"));
1875 if (regtype
== REG_TYPE_NQ
)
1876 high_range
= high_range
+ 1;
1878 if (high_range
<= new_base
)
1880 inst
.error
= _("register range not in ascending order");
1884 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1886 if (mask
& (setmask
<< new_base
))
1888 inst
.error
= _("invalid register list");
1892 mask
|= setmask
<< new_base
;
1897 while (skip_past_comma (&str
) != FAIL
);
1901 /* Sanity check -- should have raised a parse error above. */
1902 if (count
== 0 || count
> max_regs
)
1907 /* Final test -- the registers must be consecutive. */
1909 for (i
= 0; i
< count
; i
++)
1911 if ((mask
& (1u << i
)) == 0)
1913 inst
.error
= _("non-contiguous register range");
1923 /* True if two alias types are the same. */
1926 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1934 if (a
->defined
!= b
->defined
)
1937 if ((a
->defined
& NTA_HASTYPE
) != 0
1938 && (a
->eltype
.type
!= b
->eltype
.type
1939 || a
->eltype
.size
!= b
->eltype
.size
))
1942 if ((a
->defined
& NTA_HASINDEX
) != 0
1943 && (a
->index
!= b
->index
))
1949 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1950 The base register is put in *PBASE.
1951 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1953 The register stride (minus one) is put in bit 4 of the return value.
1954 Bits [6:5] encode the list length (minus one).
1955 The type of the list elements is put in *ELTYPE, if non-NULL. */
1957 #define NEON_LANE(X) ((X) & 0xf)
1958 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1959 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1962 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1963 struct neon_type_el
*eltype
)
1970 int leading_brace
= 0;
1971 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1972 const char *const incr_error
= _("register stride must be 1 or 2");
1973 const char *const type_error
= _("mismatched element/structure types in list");
1974 struct neon_typed_alias firsttype
;
1976 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1981 struct neon_typed_alias atype
;
1982 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1986 first_error (_(reg_expected_msgs
[rtype
]));
1993 if (rtype
== REG_TYPE_NQ
)
1999 else if (reg_incr
== -1)
2001 reg_incr
= getreg
- base_reg
;
2002 if (reg_incr
< 1 || reg_incr
> 2)
2004 first_error (_(incr_error
));
2008 else if (getreg
!= base_reg
+ reg_incr
* count
)
2010 first_error (_(incr_error
));
2014 if (! neon_alias_types_same (&atype
, &firsttype
))
2016 first_error (_(type_error
));
2020 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2024 struct neon_typed_alias htype
;
2025 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2027 lane
= NEON_INTERLEAVE_LANES
;
2028 else if (lane
!= NEON_INTERLEAVE_LANES
)
2030 first_error (_(type_error
));
2035 else if (reg_incr
!= 1)
2037 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2041 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2044 first_error (_(reg_expected_msgs
[rtype
]));
2047 if (! neon_alias_types_same (&htype
, &firsttype
))
2049 first_error (_(type_error
));
2052 count
+= hireg
+ dregs
- getreg
;
2056 /* If we're using Q registers, we can't use [] or [n] syntax. */
2057 if (rtype
== REG_TYPE_NQ
)
2063 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2067 else if (lane
!= atype
.index
)
2069 first_error (_(type_error
));
2073 else if (lane
== -1)
2074 lane
= NEON_INTERLEAVE_LANES
;
2075 else if (lane
!= NEON_INTERLEAVE_LANES
)
2077 first_error (_(type_error
));
2082 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2084 /* No lane set by [x]. We must be interleaving structures. */
2086 lane
= NEON_INTERLEAVE_LANES
;
2089 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2090 || (count
> 1 && reg_incr
== -1))
2092 first_error (_("error parsing element/structure list"));
2096 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2098 first_error (_("expected }"));
2106 *eltype
= firsttype
.eltype
;
2111 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2114 /* Parse an explicit relocation suffix on an expression. This is
2115 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2116 arm_reloc_hsh contains no entries, so this function can only
2117 succeed if there is no () after the word. Returns -1 on error,
2118 BFD_RELOC_UNUSED if there wasn't any suffix. */
2121 parse_reloc (char **str
)
2123 struct reloc_entry
*r
;
2127 return BFD_RELOC_UNUSED
;
2132 while (*q
&& *q
!= ')' && *q
!= ',')
2137 if ((r
= (struct reloc_entry
*)
2138 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2145 /* Directives: register aliases. */
2147 static struct reg_entry
*
2148 insert_reg_alias (char *str
, unsigned number
, int type
)
2150 struct reg_entry
*new_reg
;
2153 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2155 if (new_reg
->builtin
)
2156 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2158 /* Only warn about a redefinition if it's not defined as the
2160 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2161 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2166 name
= xstrdup (str
);
2167 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2169 new_reg
->name
= name
;
2170 new_reg
->number
= number
;
2171 new_reg
->type
= type
;
2172 new_reg
->builtin
= FALSE
;
2173 new_reg
->neon
= NULL
;
2175 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2182 insert_neon_reg_alias (char *str
, int number
, int type
,
2183 struct neon_typed_alias
*atype
)
2185 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2189 first_error (_("attempt to redefine typed alias"));
2195 reg
->neon
= (struct neon_typed_alias
*)
2196 xmalloc (sizeof (struct neon_typed_alias
));
2197 *reg
->neon
= *atype
;
2201 /* Look for the .req directive. This is of the form:
2203 new_register_name .req existing_register_name
2205 If we find one, or if it looks sufficiently like one that we want to
2206 handle any error here, return TRUE. Otherwise return FALSE. */
2209 create_register_alias (char * newname
, char *p
)
2211 struct reg_entry
*old
;
2212 char *oldname
, *nbuf
;
2215 /* The input scrubber ensures that whitespace after the mnemonic is
2216 collapsed to single spaces. */
2218 if (strncmp (oldname
, " .req ", 6) != 0)
2222 if (*oldname
== '\0')
2225 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2228 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2233 the desired alias name, and p points to its end. If not, then
2234 the desired alias name is in the global original_case_string. */
2235 #ifdef TC_CASE_SENSITIVE
2238 newname
= original_case_string
;
2239 nlen
= strlen (newname
);
2242 nbuf
= (char *) alloca (nlen
+ 1);
2243 memcpy (nbuf
, newname
, nlen
);
2246 /* Create aliases under the new name as stated; an all-lowercase
2247 version of the new name; and an all-uppercase version of the new
2249 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2251 for (p
= nbuf
; *p
; p
++)
2254 if (strncmp (nbuf
, newname
, nlen
))
2256 /* If this attempt to create an additional alias fails, do not bother
2257 trying to create the all-lower case alias. We will fail and issue
2258 a second, duplicate error message. This situation arises when the
2259 programmer does something like:
2262 The second .req creates the "Foo" alias but then fails to create
2263 the artificial FOO alias because it has already been created by the
2265 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2269 for (p
= nbuf
; *p
; p
++)
2272 if (strncmp (nbuf
, newname
, nlen
))
2273 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2279 /* Create a Neon typed/indexed register alias using directives, e.g.:
2284 These typed registers can be used instead of the types specified after the
2285 Neon mnemonic, so long as all operands given have types. Types can also be
2286 specified directly, e.g.:
2287 vadd d0.s32, d1.s32, d2.s32 */
2290 create_neon_reg_alias (char *newname
, char *p
)
2292 enum arm_reg_type basetype
;
2293 struct reg_entry
*basereg
;
2294 struct reg_entry mybasereg
;
2295 struct neon_type ntype
;
2296 struct neon_typed_alias typeinfo
;
2297 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2300 typeinfo
.defined
= 0;
2301 typeinfo
.eltype
.type
= NT_invtype
;
2302 typeinfo
.eltype
.size
= -1;
2303 typeinfo
.index
= -1;
2307 if (strncmp (p
, " .dn ", 5) == 0)
2308 basetype
= REG_TYPE_VFD
;
2309 else if (strncmp (p
, " .qn ", 5) == 0)
2310 basetype
= REG_TYPE_NQ
;
2319 basereg
= arm_reg_parse_multi (&p
);
2321 if (basereg
&& basereg
->type
!= basetype
)
2323 as_bad (_("bad type for register"));
2327 if (basereg
== NULL
)
2330 /* Try parsing as an integer. */
2331 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2332 if (exp
.X_op
!= O_constant
)
2334 as_bad (_("expression must be constant"));
2337 basereg
= &mybasereg
;
2338 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2344 typeinfo
= *basereg
->neon
;
2346 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2348 /* We got a type. */
2349 if (typeinfo
.defined
& NTA_HASTYPE
)
2351 as_bad (_("can't redefine the type of a register alias"));
2355 typeinfo
.defined
|= NTA_HASTYPE
;
2356 if (ntype
.elems
!= 1)
2358 as_bad (_("you must specify a single type only"));
2361 typeinfo
.eltype
= ntype
.el
[0];
2364 if (skip_past_char (&p
, '[') == SUCCESS
)
2367 /* We got a scalar index. */
2369 if (typeinfo
.defined
& NTA_HASINDEX
)
2371 as_bad (_("can't redefine the index of a scalar alias"));
2375 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2377 if (exp
.X_op
!= O_constant
)
2379 as_bad (_("scalar index must be constant"));
2383 typeinfo
.defined
|= NTA_HASINDEX
;
2384 typeinfo
.index
= exp
.X_add_number
;
2386 if (skip_past_char (&p
, ']') == FAIL
)
2388 as_bad (_("expecting ]"));
2393 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2394 the desired alias name, and p points to its end. If not, then
2395 the desired alias name is in the global original_case_string. */
2396 #ifdef TC_CASE_SENSITIVE
2397 namelen
= nameend
- newname
;
2399 newname
= original_case_string
;
2400 namelen
= strlen (newname
);
2403 namebuf
= (char *) alloca (namelen
+ 1);
2404 strncpy (namebuf
, newname
, namelen
);
2405 namebuf
[namelen
] = '\0';
2407 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2408 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2410 /* Insert name in all uppercase. */
2411 for (p
= namebuf
; *p
; p
++)
2414 if (strncmp (namebuf
, newname
, namelen
))
2415 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2416 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2418 /* Insert name in all lowercase. */
2419 for (p
= namebuf
; *p
; p
++)
2422 if (strncmp (namebuf
, newname
, namelen
))
2423 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2424 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2429 /* Should never be called, as .req goes between the alias and the
2430 register name, not at the beginning of the line. */
2433 s_req (int a ATTRIBUTE_UNUSED
)
2435 as_bad (_("invalid syntax for .req directive"));
2439 s_dn (int a ATTRIBUTE_UNUSED
)
2441 as_bad (_("invalid syntax for .dn directive"));
2445 s_qn (int a ATTRIBUTE_UNUSED
)
2447 as_bad (_("invalid syntax for .qn directive"));
2450 /* The .unreq directive deletes an alias which was previously defined
2451 by .req. For example:
2457 s_unreq (int a ATTRIBUTE_UNUSED
)
2462 name
= input_line_pointer
;
2464 while (*input_line_pointer
!= 0
2465 && *input_line_pointer
!= ' '
2466 && *input_line_pointer
!= '\n')
2467 ++input_line_pointer
;
2469 saved_char
= *input_line_pointer
;
2470 *input_line_pointer
= 0;
2473 as_bad (_("invalid syntax for .unreq directive"));
2476 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2480 as_bad (_("unknown register alias '%s'"), name
);
2481 else if (reg
->builtin
)
2482 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2489 hash_delete (arm_reg_hsh
, name
, FALSE
);
2490 free ((char *) reg
->name
);
2495 /* Also locate the all upper case and all lower case versions.
2496 Do not complain if we cannot find one or the other as it
2497 was probably deleted above. */
2499 nbuf
= strdup (name
);
2500 for (p
= nbuf
; *p
; p
++)
2502 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2505 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2506 free ((char *) reg
->name
);
2512 for (p
= nbuf
; *p
; p
++)
2514 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2517 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2518 free ((char *) reg
->name
);
2528 *input_line_pointer
= saved_char
;
2529 demand_empty_rest_of_line ();
2532 /* Directives: Instruction set selection. */
2535 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2536 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2537 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2538 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2540 /* Create a new mapping symbol for the transition to STATE. */
2543 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2546 const char * symname
;
2553 type
= BSF_NO_FLAGS
;
2557 type
= BSF_NO_FLAGS
;
2561 type
= BSF_NO_FLAGS
;
2567 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2568 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2573 THUMB_SET_FUNC (symbolP
, 0);
2574 ARM_SET_THUMB (symbolP
, 0);
2575 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2579 THUMB_SET_FUNC (symbolP
, 1);
2580 ARM_SET_THUMB (symbolP
, 1);
2581 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2589 /* Save the mapping symbols for future reference. Also check that
2590 we do not place two mapping symbols at the same offset within a
2591 frag. We'll handle overlap between frags in
2592 check_mapping_symbols.
2594 If .fill or other data filling directive generates zero sized data,
2595 the mapping symbol for the following code will have the same value
2596 as the one generated for the data filling directive. In this case,
2597 we replace the old symbol with the new one at the same address. */
2600 if (frag
->tc_frag_data
.first_map
!= NULL
)
2602 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2603 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2605 frag
->tc_frag_data
.first_map
= symbolP
;
2607 if (frag
->tc_frag_data
.last_map
!= NULL
)
2609 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2610 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2611 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2613 frag
->tc_frag_data
.last_map
= symbolP
;
2616 /* We must sometimes convert a region marked as code to data during
2617 code alignment, if an odd number of bytes have to be padded. The
2618 code mapping symbol is pushed to an aligned address. */
2621 insert_data_mapping_symbol (enum mstate state
,
2622 valueT value
, fragS
*frag
, offsetT bytes
)
2624 /* If there was already a mapping symbol, remove it. */
2625 if (frag
->tc_frag_data
.last_map
!= NULL
2626 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2628 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2632 know (frag
->tc_frag_data
.first_map
== symp
);
2633 frag
->tc_frag_data
.first_map
= NULL
;
2635 frag
->tc_frag_data
.last_map
= NULL
;
2636 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2639 make_mapping_symbol (MAP_DATA
, value
, frag
);
2640 make_mapping_symbol (state
, value
+ bytes
, frag
);
2643 static void mapping_state_2 (enum mstate state
, int max_chars
);
2645 /* Set the mapping state to STATE. Only call this when about to
2646 emit some STATE bytes to the file. */
2648 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2650 mapping_state (enum mstate state
)
2652 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2654 if (mapstate
== state
)
2655 /* The mapping symbol has already been emitted.
2656 There is nothing else to do. */
2659 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2661 All ARM instructions require 4-byte alignment.
2662 (Almost) all Thumb instructions require 2-byte alignment.
2664 When emitting instructions into any section, mark the section
2667 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2668 but themselves require 2-byte alignment; this applies to some
2669 PC- relative forms. However, these cases will invovle implicit
2670 literal pool generation or an explicit .align >=2, both of
2671 which will cause the section to me marked with sufficient
2672 alignment. Thus, we don't handle those cases here. */
2673 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2675 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2676 /* This case will be evaluated later. */
2679 mapping_state_2 (state
, 0);
2682 /* Same as mapping_state, but MAX_CHARS bytes have already been
2683 allocated. Put the mapping symbol that far back. */
2686 mapping_state_2 (enum mstate state
, int max_chars
)
2688 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2690 if (!SEG_NORMAL (now_seg
))
2693 if (mapstate
== state
)
2694 /* The mapping symbol has already been emitted.
2695 There is nothing else to do. */
2698 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2699 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2701 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2702 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2705 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2708 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2709 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2713 #define mapping_state(x) ((void)0)
2714 #define mapping_state_2(x, y) ((void)0)
2717 /* Find the real, Thumb encoded start of a Thumb function. */
2721 find_real_start (symbolS
* symbolP
)
2724 const char * name
= S_GET_NAME (symbolP
);
2725 symbolS
* new_target
;
2727 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2728 #define STUB_NAME ".real_start_of"
2733 /* The compiler may generate BL instructions to local labels because
2734 it needs to perform a branch to a far away location. These labels
2735 do not have a corresponding ".real_start_of" label. We check
2736 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2737 the ".real_start_of" convention for nonlocal branches. */
2738 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2741 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2742 new_target
= symbol_find (real_start
);
2744 if (new_target
== NULL
)
2746 as_warn (_("Failed to find real start of function: %s\n"), name
);
2747 new_target
= symbolP
;
2755 opcode_select (int width
)
2762 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2763 as_bad (_("selected processor does not support THUMB opcodes"));
2766 /* No need to force the alignment, since we will have been
2767 coming from ARM mode, which is word-aligned. */
2768 record_alignment (now_seg
, 1);
2775 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2776 as_bad (_("selected processor does not support ARM opcodes"));
2781 frag_align (2, 0, 0);
2783 record_alignment (now_seg
, 1);
2788 as_bad (_("invalid instruction size selected (%d)"), width
);
2793 s_arm (int ignore ATTRIBUTE_UNUSED
)
2796 demand_empty_rest_of_line ();
2800 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2803 demand_empty_rest_of_line ();
2807 s_code (int unused ATTRIBUTE_UNUSED
)
2811 temp
= get_absolute_expression ();
2816 opcode_select (temp
);
2820 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2825 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2827 /* If we are not already in thumb mode go into it, EVEN if
2828 the target processor does not support thumb instructions.
2829 This is used by gcc/config/arm/lib1funcs.asm for example
2830 to compile interworking support functions even if the
2831 target processor should not support interworking. */
2835 record_alignment (now_seg
, 1);
2838 demand_empty_rest_of_line ();
2842 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2846 /* The following label is the name/address of the start of a Thumb function.
2847 We need to know this for the interworking support. */
2848 label_is_thumb_function_name
= TRUE
;
2851 /* Perform a .set directive, but also mark the alias as
2852 being a thumb function. */
2855 s_thumb_set (int equiv
)
2857 /* XXX the following is a duplicate of the code for s_set() in read.c
2858 We cannot just call that code as we need to get at the symbol that
2865 /* Especial apologies for the random logic:
2866 This just grew, and could be parsed much more simply!
2868 name
= input_line_pointer
;
2869 delim
= get_symbol_end ();
2870 end_name
= input_line_pointer
;
2873 if (*input_line_pointer
!= ',')
2876 as_bad (_("expected comma after name \"%s\""), name
);
2878 ignore_rest_of_line ();
2882 input_line_pointer
++;
2885 if (name
[0] == '.' && name
[1] == '\0')
2887 /* XXX - this should not happen to .thumb_set. */
2891 if ((symbolP
= symbol_find (name
)) == NULL
2892 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2895 /* When doing symbol listings, play games with dummy fragments living
2896 outside the normal fragment chain to record the file and line info
2898 if (listing
& LISTING_SYMBOLS
)
2900 extern struct list_info_struct
* listing_tail
;
2901 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2903 memset (dummy_frag
, 0, sizeof (fragS
));
2904 dummy_frag
->fr_type
= rs_fill
;
2905 dummy_frag
->line
= listing_tail
;
2906 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2907 dummy_frag
->fr_symbol
= symbolP
;
2911 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2914 /* "set" symbols are local unless otherwise specified. */
2915 SF_SET_LOCAL (symbolP
);
2916 #endif /* OBJ_COFF */
2917 } /* Make a new symbol. */
2919 symbol_table_insert (symbolP
);
2924 && S_IS_DEFINED (symbolP
)
2925 && S_GET_SEGMENT (symbolP
) != reg_section
)
2926 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2928 pseudo_set (symbolP
);
2930 demand_empty_rest_of_line ();
2932 /* XXX Now we come to the Thumb specific bit of code. */
2934 THUMB_SET_FUNC (symbolP
, 1);
2935 ARM_SET_THUMB (symbolP
, 1);
2936 #if defined OBJ_ELF || defined OBJ_COFF
2937 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2941 /* Directives: Mode selection. */
2943 /* .syntax [unified|divided] - choose the new unified syntax
2944 (same for Arm and Thumb encoding, modulo slight differences in what
2945 can be represented) or the old divergent syntax for each mode. */
2947 s_syntax (int unused ATTRIBUTE_UNUSED
)
2951 name
= input_line_pointer
;
2952 delim
= get_symbol_end ();
2954 if (!strcasecmp (name
, "unified"))
2955 unified_syntax
= TRUE
;
2956 else if (!strcasecmp (name
, "divided"))
2957 unified_syntax
= FALSE
;
2960 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2963 *input_line_pointer
= delim
;
2964 demand_empty_rest_of_line ();
2967 /* Directives: sectioning and alignment. */
2969 /* Same as s_align_ptwo but align 0 => align 2. */
2972 s_align (int unused ATTRIBUTE_UNUSED
)
2977 long max_alignment
= 15;
2979 temp
= get_absolute_expression ();
2980 if (temp
> max_alignment
)
2981 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2984 as_bad (_("alignment negative. 0 assumed."));
2988 if (*input_line_pointer
== ',')
2990 input_line_pointer
++;
2991 temp_fill
= get_absolute_expression ();
3003 /* Only make a frag if we HAVE to. */
3004 if (temp
&& !need_pass_2
)
3006 if (!fill_p
&& subseg_text_p (now_seg
))
3007 frag_align_code (temp
, 0);
3009 frag_align (temp
, (int) temp_fill
, 0);
3011 demand_empty_rest_of_line ();
3013 record_alignment (now_seg
, temp
);
3017 s_bss (int ignore ATTRIBUTE_UNUSED
)
3019 /* We don't support putting frags in the BSS segment, we fake it by
3020 marking in_bss, then looking at s_skip for clues. */
3021 subseg_set (bss_section
, 0);
3022 demand_empty_rest_of_line ();
3024 #ifdef md_elf_section_change_hook
3025 md_elf_section_change_hook ();
3030 s_even (int ignore ATTRIBUTE_UNUSED
)
3032 /* Never make frag if expect extra pass. */
3034 frag_align (1, 0, 0);
3036 record_alignment (now_seg
, 1);
3038 demand_empty_rest_of_line ();
3041 /* Directives: CodeComposer Studio. */
3043 /* .ref (for CodeComposer Studio syntax only). */
3045 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3047 if (codecomposer_syntax
)
3048 ignore_rest_of_line ();
3050 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3053 /* If name is not NULL, then it is used for marking the beginning of a
3054 function, wherease if it is NULL then it means the function end. */
3056 asmfunc_debug (const char * name
)
3058 static const char * last_name
= NULL
;
3062 gas_assert (last_name
== NULL
);
3065 if (debug_type
== DEBUG_STABS
)
3066 stabs_generate_asm_func (name
, name
);
3070 gas_assert (last_name
!= NULL
);
3072 if (debug_type
== DEBUG_STABS
)
3073 stabs_generate_asm_endfunc (last_name
, last_name
);
3080 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3082 if (codecomposer_syntax
)
3084 switch (asmfunc_state
)
3086 case OUTSIDE_ASMFUNC
:
3087 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3090 case WAITING_ASMFUNC_NAME
:
3091 as_bad (_(".asmfunc repeated."));
3094 case WAITING_ENDASMFUNC
:
3095 as_bad (_(".asmfunc without function."));
3098 demand_empty_rest_of_line ();
3101 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3105 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3107 if (codecomposer_syntax
)
3109 switch (asmfunc_state
)
3111 case OUTSIDE_ASMFUNC
:
3112 as_bad (_(".endasmfunc without a .asmfunc."));
3115 case WAITING_ASMFUNC_NAME
:
3116 as_bad (_(".endasmfunc without function."));
3119 case WAITING_ENDASMFUNC
:
3120 asmfunc_state
= OUTSIDE_ASMFUNC
;
3121 asmfunc_debug (NULL
);
3124 demand_empty_rest_of_line ();
3127 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3131 s_ccs_def (int name
)
3133 if (codecomposer_syntax
)
3136 as_bad (_(".def pseudo-op only available with -mccs flag."));
3139 /* Directives: Literal pools. */
3141 static literal_pool
*
3142 find_literal_pool (void)
3144 literal_pool
* pool
;
3146 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3148 if (pool
->section
== now_seg
3149 && pool
->sub_section
== now_subseg
)
3156 static literal_pool
*
3157 find_or_make_literal_pool (void)
3159 /* Next literal pool ID number. */
3160 static unsigned int latest_pool_num
= 1;
3161 literal_pool
* pool
;
3163 pool
= find_literal_pool ();
3167 /* Create a new pool. */
3168 pool
= (literal_pool
*) xmalloc (sizeof (* pool
));
3172 pool
->next_free_entry
= 0;
3173 pool
->section
= now_seg
;
3174 pool
->sub_section
= now_subseg
;
3175 pool
->next
= list_of_pools
;
3176 pool
->symbol
= NULL
;
3177 pool
->alignment
= 2;
3179 /* Add it to the list. */
3180 list_of_pools
= pool
;
3183 /* New pools, and emptied pools, will have a NULL symbol. */
3184 if (pool
->symbol
== NULL
)
3186 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3187 (valueT
) 0, &zero_address_frag
);
3188 pool
->id
= latest_pool_num
++;
3195 /* Add the literal in the global 'inst'
3196 structure to the relevant literal pool. */
3199 add_to_lit_pool (unsigned int nbytes
)
3201 #define PADDING_SLOT 0x1
3202 #define LIT_ENTRY_SIZE_MASK 0xFF
3203 literal_pool
* pool
;
3204 unsigned int entry
, pool_size
= 0;
3205 bfd_boolean padding_slot_p
= FALSE
;
3211 imm1
= inst
.operands
[1].imm
;
3212 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3213 : inst
.reloc
.exp
.X_unsigned
? 0
3214 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3215 if (target_big_endian
)
3218 imm2
= inst
.operands
[1].imm
;
3222 pool
= find_or_make_literal_pool ();
3224 /* Check if this literal value is already in the pool. */
3225 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3229 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3230 && (inst
.reloc
.exp
.X_op
== O_constant
)
3231 && (pool
->literals
[entry
].X_add_number
3232 == inst
.reloc
.exp
.X_add_number
)
3233 && (pool
->literals
[entry
].X_md
== nbytes
)
3234 && (pool
->literals
[entry
].X_unsigned
3235 == inst
.reloc
.exp
.X_unsigned
))
3238 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3239 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3240 && (pool
->literals
[entry
].X_add_number
3241 == inst
.reloc
.exp
.X_add_number
)
3242 && (pool
->literals
[entry
].X_add_symbol
3243 == inst
.reloc
.exp
.X_add_symbol
)
3244 && (pool
->literals
[entry
].X_op_symbol
3245 == inst
.reloc
.exp
.X_op_symbol
)
3246 && (pool
->literals
[entry
].X_md
== nbytes
))
3249 else if ((nbytes
== 8)
3250 && !(pool_size
& 0x7)
3251 && ((entry
+ 1) != pool
->next_free_entry
)
3252 && (pool
->literals
[entry
].X_op
== O_constant
)
3253 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3254 && (pool
->literals
[entry
].X_unsigned
3255 == inst
.reloc
.exp
.X_unsigned
)
3256 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3257 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3258 && (pool
->literals
[entry
+ 1].X_unsigned
3259 == inst
.reloc
.exp
.X_unsigned
))
3262 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3263 if (padding_slot_p
&& (nbytes
== 4))
3269 /* Do we need to create a new entry? */
3270 if (entry
== pool
->next_free_entry
)
3272 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3274 inst
.error
= _("literal pool overflow");
3280 /* For 8-byte entries, we align to an 8-byte boundary,
3281 and split it into two 4-byte entries, because on 32-bit
3282 host, 8-byte constants are treated as big num, thus
3283 saved in "generic_bignum" which will be overwritten
3284 by later assignments.
3286 We also need to make sure there is enough space for
3289 We also check to make sure the literal operand is a
3291 if (!(inst
.reloc
.exp
.X_op
== O_constant
3292 || inst
.reloc
.exp
.X_op
== O_big
))
3294 inst
.error
= _("invalid type for literal pool");
3297 else if (pool_size
& 0x7)
3299 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3301 inst
.error
= _("literal pool overflow");
3305 pool
->literals
[entry
] = inst
.reloc
.exp
;
3306 pool
->literals
[entry
].X_add_number
= 0;
3307 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3308 pool
->next_free_entry
+= 1;
3311 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3313 inst
.error
= _("literal pool overflow");
3317 pool
->literals
[entry
] = inst
.reloc
.exp
;
3318 pool
->literals
[entry
].X_op
= O_constant
;
3319 pool
->literals
[entry
].X_add_number
= imm1
;
3320 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3321 pool
->literals
[entry
++].X_md
= 4;
3322 pool
->literals
[entry
] = inst
.reloc
.exp
;
3323 pool
->literals
[entry
].X_op
= O_constant
;
3324 pool
->literals
[entry
].X_add_number
= imm2
;
3325 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3326 pool
->literals
[entry
].X_md
= 4;
3327 pool
->alignment
= 3;
3328 pool
->next_free_entry
+= 1;
3332 pool
->literals
[entry
] = inst
.reloc
.exp
;
3333 pool
->literals
[entry
].X_md
= 4;
3337 /* PR ld/12974: Record the location of the first source line to reference
3338 this entry in the literal pool. If it turns out during linking that the
3339 symbol does not exist we will be able to give an accurate line number for
3340 the (first use of the) missing reference. */
3341 if (debug_type
== DEBUG_DWARF2
)
3342 dwarf2_where (pool
->locs
+ entry
);
3344 pool
->next_free_entry
+= 1;
3346 else if (padding_slot_p
)
3348 pool
->literals
[entry
] = inst
.reloc
.exp
;
3349 pool
->literals
[entry
].X_md
= nbytes
;
3352 inst
.reloc
.exp
.X_op
= O_symbol
;
3353 inst
.reloc
.exp
.X_add_number
= pool_size
;
3354 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3360 tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED
, const char * rest
)
3362 bfd_boolean ret
= TRUE
;
3364 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3366 const char *label
= rest
;
3368 while (!is_end_of_line
[(int) label
[-1]])
3373 as_bad (_("Invalid label '%s'"), label
);
3377 asmfunc_debug (label
);
3379 asmfunc_state
= WAITING_ENDASMFUNC
;
3385 /* Can't use symbol_new here, so have to create a symbol and then at
3386 a later date assign it a value. Thats what these functions do. */
3389 symbol_locate (symbolS
* symbolP
,
3390 const char * name
, /* It is copied, the caller can modify. */
3391 segT segment
, /* Segment identifier (SEG_<something>). */
3392 valueT valu
, /* Symbol value. */
3393 fragS
* frag
) /* Associated fragment. */
3396 char * preserved_copy_of_name
;
3398 name_length
= strlen (name
) + 1; /* +1 for \0. */
3399 obstack_grow (¬es
, name
, name_length
);
3400 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3402 #ifdef tc_canonicalize_symbol_name
3403 preserved_copy_of_name
=
3404 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3407 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3409 S_SET_SEGMENT (symbolP
, segment
);
3410 S_SET_VALUE (symbolP
, valu
);
3411 symbol_clear_list_pointers (symbolP
);
3413 symbol_set_frag (symbolP
, frag
);
3415 /* Link to end of symbol chain. */
3417 extern int symbol_table_frozen
;
3419 if (symbol_table_frozen
)
3423 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3425 obj_symbol_new_hook (symbolP
);
3427 #ifdef tc_symbol_new_hook
3428 tc_symbol_new_hook (symbolP
);
3432 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3433 #endif /* DEBUG_SYMS */
3437 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3440 literal_pool
* pool
;
3443 pool
= find_literal_pool ();
3445 || pool
->symbol
== NULL
3446 || pool
->next_free_entry
== 0)
3449 /* Align pool as you have word accesses.
3450 Only make a frag if we have to. */
3452 frag_align (pool
->alignment
, 0, 0);
3454 record_alignment (now_seg
, 2);
3457 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3458 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3460 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3462 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3463 (valueT
) frag_now_fix (), frag_now
);
3464 symbol_table_insert (pool
->symbol
);
3466 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3468 #if defined OBJ_COFF || defined OBJ_ELF
3469 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3472 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3475 if (debug_type
== DEBUG_DWARF2
)
3476 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3478 /* First output the expression in the instruction to the pool. */
3479 emit_expr (&(pool
->literals
[entry
]),
3480 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3483 /* Mark the pool as empty. */
3484 pool
->next_free_entry
= 0;
3485 pool
->symbol
= NULL
;
3489 /* Forward declarations for functions below, in the MD interface
3491 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3492 static valueT
create_unwind_entry (int);
3493 static void start_unwind_section (const segT
, int);
3494 static void add_unwind_opcode (valueT
, int);
3495 static void flush_pending_unwind (void);
3497 /* Directives: Data. */
3500 s_arm_elf_cons (int nbytes
)
3504 #ifdef md_flush_pending_output
3505 md_flush_pending_output ();
3508 if (is_it_end_of_statement ())
3510 demand_empty_rest_of_line ();
3514 #ifdef md_cons_align
3515 md_cons_align (nbytes
);
3518 mapping_state (MAP_DATA
);
3522 char *base
= input_line_pointer
;
3526 if (exp
.X_op
!= O_symbol
)
3527 emit_expr (&exp
, (unsigned int) nbytes
);
3530 char *before_reloc
= input_line_pointer
;
3531 reloc
= parse_reloc (&input_line_pointer
);
3534 as_bad (_("unrecognized relocation suffix"));
3535 ignore_rest_of_line ();
3538 else if (reloc
== BFD_RELOC_UNUSED
)
3539 emit_expr (&exp
, (unsigned int) nbytes
);
3542 reloc_howto_type
*howto
= (reloc_howto_type
*)
3543 bfd_reloc_type_lookup (stdoutput
,
3544 (bfd_reloc_code_real_type
) reloc
);
3545 int size
= bfd_get_reloc_size (howto
);
3547 if (reloc
== BFD_RELOC_ARM_PLT32
)
3549 as_bad (_("(plt) is only valid on branch targets"));
3550 reloc
= BFD_RELOC_UNUSED
;
3555 as_bad (_("%s relocations do not fit in %d bytes"),
3556 howto
->name
, nbytes
);
3559 /* We've parsed an expression stopping at O_symbol.
3560 But there may be more expression left now that we
3561 have parsed the relocation marker. Parse it again.
3562 XXX Surely there is a cleaner way to do this. */
3563 char *p
= input_line_pointer
;
3565 char *save_buf
= (char *) alloca (input_line_pointer
- base
);
3566 memcpy (save_buf
, base
, input_line_pointer
- base
);
3567 memmove (base
+ (input_line_pointer
- before_reloc
),
3568 base
, before_reloc
- base
);
3570 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3572 memcpy (base
, save_buf
, p
- base
);
3574 offset
= nbytes
- size
;
3575 p
= frag_more (nbytes
);
3576 memset (p
, 0, nbytes
);
3577 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3578 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3583 while (*input_line_pointer
++ == ',');
3585 /* Put terminator back into stream. */
3586 input_line_pointer
--;
3587 demand_empty_rest_of_line ();
3590 /* Emit an expression containing a 32-bit thumb instruction.
3591 Implementation based on put_thumb32_insn. */
3594 emit_thumb32_expr (expressionS
* exp
)
3596 expressionS exp_high
= *exp
;
3598 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3599 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3600 exp
->X_add_number
&= 0xffff;
3601 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3604 /* Guess the instruction size based on the opcode. */
3607 thumb_insn_size (int opcode
)
3609 if ((unsigned int) opcode
< 0xe800u
)
3611 else if ((unsigned int) opcode
>= 0xe8000000u
)
3618 emit_insn (expressionS
*exp
, int nbytes
)
3622 if (exp
->X_op
== O_constant
)
3627 size
= thumb_insn_size (exp
->X_add_number
);
3631 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3633 as_bad (_(".inst.n operand too big. "\
3634 "Use .inst.w instead"));
3639 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3640 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3642 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3644 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3645 emit_thumb32_expr (exp
);
3647 emit_expr (exp
, (unsigned int) size
);
3649 it_fsm_post_encode ();
3653 as_bad (_("cannot determine Thumb instruction size. " \
3654 "Use .inst.n/.inst.w instead"));
3657 as_bad (_("constant expression required"));
3662 /* Like s_arm_elf_cons but do not use md_cons_align and
3663 set the mapping state to MAP_ARM/MAP_THUMB. */
3666 s_arm_elf_inst (int nbytes
)
3668 if (is_it_end_of_statement ())
3670 demand_empty_rest_of_line ();
3674 /* Calling mapping_state () here will not change ARM/THUMB,
3675 but will ensure not to be in DATA state. */
3678 mapping_state (MAP_THUMB
);
3683 as_bad (_("width suffixes are invalid in ARM mode"));
3684 ignore_rest_of_line ();
3690 mapping_state (MAP_ARM
);
3699 if (! emit_insn (& exp
, nbytes
))
3701 ignore_rest_of_line ();
3705 while (*input_line_pointer
++ == ',');
3707 /* Put terminator back into stream. */
3708 input_line_pointer
--;
3709 demand_empty_rest_of_line ();
3712 /* Parse a .rel31 directive. */
3715 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3722 if (*input_line_pointer
== '1')
3723 highbit
= 0x80000000;
3724 else if (*input_line_pointer
!= '0')
3725 as_bad (_("expected 0 or 1"));
3727 input_line_pointer
++;
3728 if (*input_line_pointer
!= ',')
3729 as_bad (_("missing comma"));
3730 input_line_pointer
++;
3732 #ifdef md_flush_pending_output
3733 md_flush_pending_output ();
3736 #ifdef md_cons_align
3740 mapping_state (MAP_DATA
);
3745 md_number_to_chars (p
, highbit
, 4);
3746 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3747 BFD_RELOC_ARM_PREL31
);
3749 demand_empty_rest_of_line ();
3752 /* Directives: AEABI stack-unwind tables. */
3754 /* Parse an unwind_fnstart directive. Simply records the current location. */
3757 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3759 demand_empty_rest_of_line ();
3760 if (unwind
.proc_start
)
3762 as_bad (_("duplicate .fnstart directive"));
3766 /* Mark the start of the function. */
3767 unwind
.proc_start
= expr_build_dot ();
3769 /* Reset the rest of the unwind info. */
3770 unwind
.opcode_count
= 0;
3771 unwind
.table_entry
= NULL
;
3772 unwind
.personality_routine
= NULL
;
3773 unwind
.personality_index
= -1;
3774 unwind
.frame_size
= 0;
3775 unwind
.fp_offset
= 0;
3776 unwind
.fp_reg
= REG_SP
;
3778 unwind
.sp_restored
= 0;
3782 /* Parse a handlerdata directive. Creates the exception handling table entry
3783 for the function. */
3786 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3788 demand_empty_rest_of_line ();
3789 if (!unwind
.proc_start
)
3790 as_bad (MISSING_FNSTART
);
3792 if (unwind
.table_entry
)
3793 as_bad (_("duplicate .handlerdata directive"));
3795 create_unwind_entry (1);
3798 /* Parse an unwind_fnend directive. Generates the index table entry. */
3801 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3806 unsigned int marked_pr_dependency
;
3808 demand_empty_rest_of_line ();
3810 if (!unwind
.proc_start
)
3812 as_bad (_(".fnend directive without .fnstart"));
3816 /* Add eh table entry. */
3817 if (unwind
.table_entry
== NULL
)
3818 val
= create_unwind_entry (0);
3822 /* Add index table entry. This is two words. */
3823 start_unwind_section (unwind
.saved_seg
, 1);
3824 frag_align (2, 0, 0);
3825 record_alignment (now_seg
, 2);
3827 ptr
= frag_more (8);
3829 where
= frag_now_fix () - 8;
3831 /* Self relative offset of the function start. */
3832 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3833 BFD_RELOC_ARM_PREL31
);
3835 /* Indicate dependency on EHABI-defined personality routines to the
3836 linker, if it hasn't been done already. */
3837 marked_pr_dependency
3838 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3839 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3840 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3842 static const char *const name
[] =
3844 "__aeabi_unwind_cpp_pr0",
3845 "__aeabi_unwind_cpp_pr1",
3846 "__aeabi_unwind_cpp_pr2"
3848 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3849 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3850 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3851 |= 1 << unwind
.personality_index
;
3855 /* Inline exception table entry. */
3856 md_number_to_chars (ptr
+ 4, val
, 4);
3858 /* Self relative offset of the table entry. */
3859 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3860 BFD_RELOC_ARM_PREL31
);
3862 /* Restore the original section. */
3863 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3865 unwind
.proc_start
= NULL
;
3869 /* Parse an unwind_cantunwind directive. */
3872 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3874 demand_empty_rest_of_line ();
3875 if (!unwind
.proc_start
)
3876 as_bad (MISSING_FNSTART
);
3878 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3879 as_bad (_("personality routine specified for cantunwind frame"));
3881 unwind
.personality_index
= -2;
3885 /* Parse a personalityindex directive. */
3888 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3892 if (!unwind
.proc_start
)
3893 as_bad (MISSING_FNSTART
);
3895 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3896 as_bad (_("duplicate .personalityindex directive"));
3900 if (exp
.X_op
!= O_constant
3901 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3903 as_bad (_("bad personality routine number"));
3904 ignore_rest_of_line ();
3908 unwind
.personality_index
= exp
.X_add_number
;
3910 demand_empty_rest_of_line ();
3914 /* Parse a personality directive. */
3917 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3921 if (!unwind
.proc_start
)
3922 as_bad (MISSING_FNSTART
);
3924 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3925 as_bad (_("duplicate .personality directive"));
3927 name
= input_line_pointer
;
3928 c
= get_symbol_end ();
3929 p
= input_line_pointer
;
3930 unwind
.personality_routine
= symbol_find_or_make (name
);
3932 demand_empty_rest_of_line ();
3936 /* Parse a directive saving core registers. */
3939 s_arm_unwind_save_core (void)
3945 range
= parse_reg_list (&input_line_pointer
);
3948 as_bad (_("expected register list"));
3949 ignore_rest_of_line ();
3953 demand_empty_rest_of_line ();
3955 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3956 into .unwind_save {..., sp...}. We aren't bothered about the value of
3957 ip because it is clobbered by calls. */
3958 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3959 && (range
& 0x3000) == 0x1000)
3961 unwind
.opcode_count
--;
3962 unwind
.sp_restored
= 0;
3963 range
= (range
| 0x2000) & ~0x1000;
3964 unwind
.pending_offset
= 0;
3970 /* See if we can use the short opcodes. These pop a block of up to 8
3971 registers starting with r4, plus maybe r14. */
3972 for (n
= 0; n
< 8; n
++)
3974 /* Break at the first non-saved register. */
3975 if ((range
& (1 << (n
+ 4))) == 0)
3978 /* See if there are any other bits set. */
3979 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3981 /* Use the long form. */
3982 op
= 0x8000 | ((range
>> 4) & 0xfff);
3983 add_unwind_opcode (op
, 2);
3987 /* Use the short form. */
3989 op
= 0xa8; /* Pop r14. */
3991 op
= 0xa0; /* Do not pop r14. */
3993 add_unwind_opcode (op
, 1);
4000 op
= 0xb100 | (range
& 0xf);
4001 add_unwind_opcode (op
, 2);
4004 /* Record the number of bytes pushed. */
4005 for (n
= 0; n
< 16; n
++)
4007 if (range
& (1 << n
))
4008 unwind
.frame_size
+= 4;
4013 /* Parse a directive saving FPA registers. */
4016 s_arm_unwind_save_fpa (int reg
)
4022 /* Get Number of registers to transfer. */
4023 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4026 exp
.X_op
= O_illegal
;
4028 if (exp
.X_op
!= O_constant
)
4030 as_bad (_("expected , <constant>"));
4031 ignore_rest_of_line ();
4035 num_regs
= exp
.X_add_number
;
4037 if (num_regs
< 1 || num_regs
> 4)
4039 as_bad (_("number of registers must be in the range [1:4]"));
4040 ignore_rest_of_line ();
4044 demand_empty_rest_of_line ();
4049 op
= 0xb4 | (num_regs
- 1);
4050 add_unwind_opcode (op
, 1);
4055 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4056 add_unwind_opcode (op
, 2);
4058 unwind
.frame_size
+= num_regs
* 12;
4062 /* Parse a directive saving VFP registers for ARMv6 and above. */
4065 s_arm_unwind_save_vfp_armv6 (void)
4070 int num_vfpv3_regs
= 0;
4071 int num_regs_below_16
;
4073 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4076 as_bad (_("expected register list"));
4077 ignore_rest_of_line ();
4081 demand_empty_rest_of_line ();
4083 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4084 than FSTMX/FLDMX-style ones). */
4086 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4088 num_vfpv3_regs
= count
;
4089 else if (start
+ count
> 16)
4090 num_vfpv3_regs
= start
+ count
- 16;
4092 if (num_vfpv3_regs
> 0)
4094 int start_offset
= start
> 16 ? start
- 16 : 0;
4095 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4096 add_unwind_opcode (op
, 2);
4099 /* Generate opcode for registers numbered in the range 0 .. 15. */
4100 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4101 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4102 if (num_regs_below_16
> 0)
4104 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4105 add_unwind_opcode (op
, 2);
4108 unwind
.frame_size
+= count
* 8;
4112 /* Parse a directive saving VFP registers for pre-ARMv6. */
4115 s_arm_unwind_save_vfp (void)
4121 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4124 as_bad (_("expected register list"));
4125 ignore_rest_of_line ();
4129 demand_empty_rest_of_line ();
4134 op
= 0xb8 | (count
- 1);
4135 add_unwind_opcode (op
, 1);
4140 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4141 add_unwind_opcode (op
, 2);
4143 unwind
.frame_size
+= count
* 8 + 4;
4147 /* Parse a directive saving iWMMXt data registers. */
4150 s_arm_unwind_save_mmxwr (void)
4158 if (*input_line_pointer
== '{')
4159 input_line_pointer
++;
4163 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4167 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4172 as_tsktsk (_("register list not in ascending order"));
4175 if (*input_line_pointer
== '-')
4177 input_line_pointer
++;
4178 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4181 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4184 else if (reg
>= hi_reg
)
4186 as_bad (_("bad register range"));
4189 for (; reg
< hi_reg
; reg
++)
4193 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4195 skip_past_char (&input_line_pointer
, '}');
4197 demand_empty_rest_of_line ();
4199 /* Generate any deferred opcodes because we're going to be looking at
4201 flush_pending_unwind ();
4203 for (i
= 0; i
< 16; i
++)
4205 if (mask
& (1 << i
))
4206 unwind
.frame_size
+= 8;
4209 /* Attempt to combine with a previous opcode. We do this because gcc
4210 likes to output separate unwind directives for a single block of
4212 if (unwind
.opcode_count
> 0)
4214 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4215 if ((i
& 0xf8) == 0xc0)
4218 /* Only merge if the blocks are contiguous. */
4221 if ((mask
& 0xfe00) == (1 << 9))
4223 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4224 unwind
.opcode_count
--;
4227 else if (i
== 6 && unwind
.opcode_count
>= 2)
4229 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4233 op
= 0xffff << (reg
- 1);
4235 && ((mask
& op
) == (1u << (reg
- 1))))
4237 op
= (1 << (reg
+ i
+ 1)) - 1;
4238 op
&= ~((1 << reg
) - 1);
4240 unwind
.opcode_count
-= 2;
4247 /* We want to generate opcodes in the order the registers have been
4248 saved, ie. descending order. */
4249 for (reg
= 15; reg
>= -1; reg
--)
4251 /* Save registers in blocks. */
4253 || !(mask
& (1 << reg
)))
4255 /* We found an unsaved reg. Generate opcodes to save the
4262 op
= 0xc0 | (hi_reg
- 10);
4263 add_unwind_opcode (op
, 1);
4268 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4269 add_unwind_opcode (op
, 2);
4278 ignore_rest_of_line ();
4282 s_arm_unwind_save_mmxwcg (void)
4289 if (*input_line_pointer
== '{')
4290 input_line_pointer
++;
4292 skip_whitespace (input_line_pointer
);
4296 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4300 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4306 as_tsktsk (_("register list not in ascending order"));
4309 if (*input_line_pointer
== '-')
4311 input_line_pointer
++;
4312 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4315 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4318 else if (reg
>= hi_reg
)
4320 as_bad (_("bad register range"));
4323 for (; reg
< hi_reg
; reg
++)
4327 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4329 skip_past_char (&input_line_pointer
, '}');
4331 demand_empty_rest_of_line ();
4333 /* Generate any deferred opcodes because we're going to be looking at
4335 flush_pending_unwind ();
4337 for (reg
= 0; reg
< 16; reg
++)
4339 if (mask
& (1 << reg
))
4340 unwind
.frame_size
+= 4;
4343 add_unwind_opcode (op
, 2);
4346 ignore_rest_of_line ();
4350 /* Parse an unwind_save directive.
4351 If the argument is non-zero, this is a .vsave directive. */
4354 s_arm_unwind_save (int arch_v6
)
4357 struct reg_entry
*reg
;
4358 bfd_boolean had_brace
= FALSE
;
4360 if (!unwind
.proc_start
)
4361 as_bad (MISSING_FNSTART
);
4363 /* Figure out what sort of save we have. */
4364 peek
= input_line_pointer
;
4372 reg
= arm_reg_parse_multi (&peek
);
4376 as_bad (_("register expected"));
4377 ignore_rest_of_line ();
4386 as_bad (_("FPA .unwind_save does not take a register list"));
4387 ignore_rest_of_line ();
4390 input_line_pointer
= peek
;
4391 s_arm_unwind_save_fpa (reg
->number
);
4395 s_arm_unwind_save_core ();
4400 s_arm_unwind_save_vfp_armv6 ();
4402 s_arm_unwind_save_vfp ();
4405 case REG_TYPE_MMXWR
:
4406 s_arm_unwind_save_mmxwr ();
4409 case REG_TYPE_MMXWCG
:
4410 s_arm_unwind_save_mmxwcg ();
4414 as_bad (_(".unwind_save does not support this kind of register"));
4415 ignore_rest_of_line ();
4420 /* Parse an unwind_movsp directive. */
4423 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4429 if (!unwind
.proc_start
)
4430 as_bad (MISSING_FNSTART
);
4432 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4435 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4436 ignore_rest_of_line ();
4440 /* Optional constant. */
4441 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4443 if (immediate_for_directive (&offset
) == FAIL
)
4449 demand_empty_rest_of_line ();
4451 if (reg
== REG_SP
|| reg
== REG_PC
)
4453 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4457 if (unwind
.fp_reg
!= REG_SP
)
4458 as_bad (_("unexpected .unwind_movsp directive"));
4460 /* Generate opcode to restore the value. */
4462 add_unwind_opcode (op
, 1);
4464 /* Record the information for later. */
4465 unwind
.fp_reg
= reg
;
4466 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4467 unwind
.sp_restored
= 1;
4470 /* Parse an unwind_pad directive. */
4473 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4477 if (!unwind
.proc_start
)
4478 as_bad (MISSING_FNSTART
);
4480 if (immediate_for_directive (&offset
) == FAIL
)
4485 as_bad (_("stack increment must be multiple of 4"));
4486 ignore_rest_of_line ();
4490 /* Don't generate any opcodes, just record the details for later. */
4491 unwind
.frame_size
+= offset
;
4492 unwind
.pending_offset
+= offset
;
4494 demand_empty_rest_of_line ();
4497 /* Parse an unwind_setfp directive. */
4500 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4506 if (!unwind
.proc_start
)
4507 as_bad (MISSING_FNSTART
);
4509 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4510 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4513 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4515 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4517 as_bad (_("expected <reg>, <reg>"));
4518 ignore_rest_of_line ();
4522 /* Optional constant. */
4523 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4525 if (immediate_for_directive (&offset
) == FAIL
)
4531 demand_empty_rest_of_line ();
4533 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4535 as_bad (_("register must be either sp or set by a previous"
4536 "unwind_movsp directive"));
4540 /* Don't generate any opcodes, just record the information for later. */
4541 unwind
.fp_reg
= fp_reg
;
4543 if (sp_reg
== REG_SP
)
4544 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4546 unwind
.fp_offset
-= offset
;
4549 /* Parse an unwind_raw directive. */
4552 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4555 /* This is an arbitrary limit. */
4556 unsigned char op
[16];
4559 if (!unwind
.proc_start
)
4560 as_bad (MISSING_FNSTART
);
4563 if (exp
.X_op
== O_constant
4564 && skip_past_comma (&input_line_pointer
) != FAIL
)
4566 unwind
.frame_size
+= exp
.X_add_number
;
4570 exp
.X_op
= O_illegal
;
4572 if (exp
.X_op
!= O_constant
)
4574 as_bad (_("expected <offset>, <opcode>"));
4575 ignore_rest_of_line ();
4581 /* Parse the opcode. */
4586 as_bad (_("unwind opcode too long"));
4587 ignore_rest_of_line ();
4589 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4591 as_bad (_("invalid unwind opcode"));
4592 ignore_rest_of_line ();
4595 op
[count
++] = exp
.X_add_number
;
4597 /* Parse the next byte. */
4598 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4604 /* Add the opcode bytes in reverse order. */
4606 add_unwind_opcode (op
[count
], 1);
4608 demand_empty_rest_of_line ();
4612 /* Parse a .eabi_attribute directive. */
4615 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4617 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4619 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4620 attributes_set_explicitly
[tag
] = 1;
4623 /* Emit a tls fix for the symbol. */
4626 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4630 #ifdef md_flush_pending_output
4631 md_flush_pending_output ();
4634 #ifdef md_cons_align
4638 /* Since we're just labelling the code, there's no need to define a
4641 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4642 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4643 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4644 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4646 #endif /* OBJ_ELF */
4648 static void s_arm_arch (int);
4649 static void s_arm_object_arch (int);
4650 static void s_arm_cpu (int);
4651 static void s_arm_fpu (int);
4652 static void s_arm_arch_extension (int);
4657 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4664 if (exp
.X_op
== O_symbol
)
4665 exp
.X_op
= O_secrel
;
4667 emit_expr (&exp
, 4);
4669 while (*input_line_pointer
++ == ',');
4671 input_line_pointer
--;
4672 demand_empty_rest_of_line ();
4676 /* This table describes all the machine specific pseudo-ops the assembler
4677 has to support. The fields are:
4678 pseudo-op name without dot
4679 function to call to execute this pseudo-op
4680 Integer arg to pass to the function. */
4682 const pseudo_typeS md_pseudo_table
[] =
4684 /* Never called because '.req' does not start a line. */
4685 { "req", s_req
, 0 },
4686 /* Following two are likewise never called. */
4689 { "unreq", s_unreq
, 0 },
4690 { "bss", s_bss
, 0 },
4691 { "align", s_align
, 0 },
4692 { "arm", s_arm
, 0 },
4693 { "thumb", s_thumb
, 0 },
4694 { "code", s_code
, 0 },
4695 { "force_thumb", s_force_thumb
, 0 },
4696 { "thumb_func", s_thumb_func
, 0 },
4697 { "thumb_set", s_thumb_set
, 0 },
4698 { "even", s_even
, 0 },
4699 { "ltorg", s_ltorg
, 0 },
4700 { "pool", s_ltorg
, 0 },
4701 { "syntax", s_syntax
, 0 },
4702 { "cpu", s_arm_cpu
, 0 },
4703 { "arch", s_arm_arch
, 0 },
4704 { "object_arch", s_arm_object_arch
, 0 },
4705 { "fpu", s_arm_fpu
, 0 },
4706 { "arch_extension", s_arm_arch_extension
, 0 },
4708 { "word", s_arm_elf_cons
, 4 },
4709 { "long", s_arm_elf_cons
, 4 },
4710 { "inst.n", s_arm_elf_inst
, 2 },
4711 { "inst.w", s_arm_elf_inst
, 4 },
4712 { "inst", s_arm_elf_inst
, 0 },
4713 { "rel31", s_arm_rel31
, 0 },
4714 { "fnstart", s_arm_unwind_fnstart
, 0 },
4715 { "fnend", s_arm_unwind_fnend
, 0 },
4716 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4717 { "personality", s_arm_unwind_personality
, 0 },
4718 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4719 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4720 { "save", s_arm_unwind_save
, 0 },
4721 { "vsave", s_arm_unwind_save
, 1 },
4722 { "movsp", s_arm_unwind_movsp
, 0 },
4723 { "pad", s_arm_unwind_pad
, 0 },
4724 { "setfp", s_arm_unwind_setfp
, 0 },
4725 { "unwind_raw", s_arm_unwind_raw
, 0 },
4726 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4727 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4731 /* These are used for dwarf. */
4735 /* These are used for dwarf2. */
4736 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4737 { "loc", dwarf2_directive_loc
, 0 },
4738 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4740 { "extend", float_cons
, 'x' },
4741 { "ldouble", float_cons
, 'x' },
4742 { "packed", float_cons
, 'p' },
4744 {"secrel32", pe_directive_secrel
, 0},
4747 /* These are for compatibility with CodeComposer Studio. */
4748 {"ref", s_ccs_ref
, 0},
4749 {"def", s_ccs_def
, 0},
4750 {"asmfunc", s_ccs_asmfunc
, 0},
4751 {"endasmfunc", s_ccs_endasmfunc
, 0},
4756 /* Parser functions used exclusively in instruction operands. */
4758 /* Generic immediate-value read function for use in insn parsing.
4759 STR points to the beginning of the immediate (the leading #);
4760 VAL receives the value; if the value is outside [MIN, MAX]
4761 issue an error. PREFIX_OPT is true if the immediate prefix is
4765 parse_immediate (char **str
, int *val
, int min
, int max
,
4766 bfd_boolean prefix_opt
)
4769 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4770 if (exp
.X_op
!= O_constant
)
4772 inst
.error
= _("constant expression required");
4776 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4778 inst
.error
= _("immediate value out of range");
4782 *val
= exp
.X_add_number
;
4786 /* Less-generic immediate-value read function with the possibility of loading a
4787 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4788 instructions. Puts the result directly in inst.operands[i]. */
4791 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4792 bfd_boolean allow_symbol_p
)
4795 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4798 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4800 if (exp_p
->X_op
== O_constant
)
4802 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4803 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4804 O_constant. We have to be careful not to break compilation for
4805 32-bit X_add_number, though. */
4806 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4808 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4809 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4811 inst
.operands
[i
].regisimm
= 1;
4814 else if (exp_p
->X_op
== O_big
4815 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4817 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4819 /* Bignums have their least significant bits in
4820 generic_bignum[0]. Make sure we put 32 bits in imm and
4821 32 bits in reg, in a (hopefully) portable way. */
4822 gas_assert (parts
!= 0);
4824 /* Make sure that the number is not too big.
4825 PR 11972: Bignums can now be sign-extended to the
4826 size of a .octa so check that the out of range bits
4827 are all zero or all one. */
4828 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4830 LITTLENUM_TYPE m
= -1;
4832 if (generic_bignum
[parts
* 2] != 0
4833 && generic_bignum
[parts
* 2] != m
)
4836 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4837 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4841 inst
.operands
[i
].imm
= 0;
4842 for (j
= 0; j
< parts
; j
++, idx
++)
4843 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4844 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4845 inst
.operands
[i
].reg
= 0;
4846 for (j
= 0; j
< parts
; j
++, idx
++)
4847 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4848 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4849 inst
.operands
[i
].regisimm
= 1;
4851 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4859 /* Returns the pseudo-register number of an FPA immediate constant,
4860 or FAIL if there isn't a valid constant here. */
4863 parse_fpa_immediate (char ** str
)
4865 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4871 /* First try and match exact strings, this is to guarantee
4872 that some formats will work even for cross assembly. */
4874 for (i
= 0; fp_const
[i
]; i
++)
4876 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4880 *str
+= strlen (fp_const
[i
]);
4881 if (is_end_of_line
[(unsigned char) **str
])
4887 /* Just because we didn't get a match doesn't mean that the constant
4888 isn't valid, just that it is in a format that we don't
4889 automatically recognize. Try parsing it with the standard
4890 expression routines. */
4892 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4894 /* Look for a raw floating point number. */
4895 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4896 && is_end_of_line
[(unsigned char) *save_in
])
4898 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4900 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4902 if (words
[j
] != fp_values
[i
][j
])
4906 if (j
== MAX_LITTLENUMS
)
4914 /* Try and parse a more complex expression, this will probably fail
4915 unless the code uses a floating point prefix (eg "0f"). */
4916 save_in
= input_line_pointer
;
4917 input_line_pointer
= *str
;
4918 if (expression (&exp
) == absolute_section
4919 && exp
.X_op
== O_big
4920 && exp
.X_add_number
< 0)
4922 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4924 if (gen_to_words (words
, 5, (long) 15) == 0)
4926 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4928 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4930 if (words
[j
] != fp_values
[i
][j
])
4934 if (j
== MAX_LITTLENUMS
)
4936 *str
= input_line_pointer
;
4937 input_line_pointer
= save_in
;
4944 *str
= input_line_pointer
;
4945 input_line_pointer
= save_in
;
4946 inst
.error
= _("invalid FPA immediate expression");
4950 /* Returns 1 if a number has "quarter-precision" float format
4951 0baBbbbbbc defgh000 00000000 00000000. */
4954 is_quarter_float (unsigned imm
)
4956 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4957 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4961 /* Detect the presence of a floating point or integer zero constant,
4965 parse_ifimm_zero (char **in
)
4969 if (!is_immediate_prefix (**in
))
4974 /* Accept #0x0 as a synonym for #0. */
4975 if (strncmp (*in
, "0x", 2) == 0)
4978 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4983 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4984 &generic_floating_point_number
);
4987 && generic_floating_point_number
.sign
== '+'
4988 && (generic_floating_point_number
.low
4989 > generic_floating_point_number
.leader
))
4995 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4996 0baBbbbbbc defgh000 00000000 00000000.
4997 The zero and minus-zero cases need special handling, since they can't be
4998 encoded in the "quarter-precision" float format, but can nonetheless be
4999 loaded as integer constants. */
5002 parse_qfloat_immediate (char **ccp
, int *immed
)
5006 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5007 int found_fpchar
= 0;
5009 skip_past_char (&str
, '#');
5011 /* We must not accidentally parse an integer as a floating-point number. Make
5012 sure that the value we parse is not an integer by checking for special
5013 characters '.' or 'e'.
5014 FIXME: This is a horrible hack, but doing better is tricky because type
5015 information isn't in a very usable state at parse time. */
5017 skip_whitespace (fpnum
);
5019 if (strncmp (fpnum
, "0x", 2) == 0)
5023 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5024 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5034 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5036 unsigned fpword
= 0;
5039 /* Our FP word must be 32 bits (single-precision FP). */
5040 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5042 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5046 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5059 /* Shift operands. */
5062 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5065 struct asm_shift_name
5068 enum shift_kind kind
;
5071 /* Third argument to parse_shift. */
5072 enum parse_shift_mode
5074 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5075 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5076 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5077 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5078 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5081 /* Parse a <shift> specifier on an ARM data processing instruction.
5082 This has three forms:
5084 (LSL|LSR|ASL|ASR|ROR) Rs
5085 (LSL|LSR|ASL|ASR|ROR) #imm
5088 Note that ASL is assimilated to LSL in the instruction encoding, and
5089 RRX to ROR #0 (which cannot be written as such). */
5092 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5094 const struct asm_shift_name
*shift_name
;
5095 enum shift_kind shift
;
5100 for (p
= *str
; ISALPHA (*p
); p
++)
5105 inst
.error
= _("shift expression expected");
5109 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5112 if (shift_name
== NULL
)
5114 inst
.error
= _("shift expression expected");
5118 shift
= shift_name
->kind
;
5122 case NO_SHIFT_RESTRICT
:
5123 case SHIFT_IMMEDIATE
: break;
5125 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5126 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5128 inst
.error
= _("'LSL' or 'ASR' required");
5133 case SHIFT_LSL_IMMEDIATE
:
5134 if (shift
!= SHIFT_LSL
)
5136 inst
.error
= _("'LSL' required");
5141 case SHIFT_ASR_IMMEDIATE
:
5142 if (shift
!= SHIFT_ASR
)
5144 inst
.error
= _("'ASR' required");
5152 if (shift
!= SHIFT_RRX
)
5154 /* Whitespace can appear here if the next thing is a bare digit. */
5155 skip_whitespace (p
);
5157 if (mode
== NO_SHIFT_RESTRICT
5158 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5160 inst
.operands
[i
].imm
= reg
;
5161 inst
.operands
[i
].immisreg
= 1;
5163 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5166 inst
.operands
[i
].shift_kind
= shift
;
5167 inst
.operands
[i
].shifted
= 1;
5172 /* Parse a <shifter_operand> for an ARM data processing instruction:
5175 #<immediate>, <rotate>
5179 where <shift> is defined by parse_shift above, and <rotate> is a
5180 multiple of 2 between 0 and 30. Validation of immediate operands
5181 is deferred to md_apply_fix. */
5184 parse_shifter_operand (char **str
, int i
)
5189 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5191 inst
.operands
[i
].reg
= value
;
5192 inst
.operands
[i
].isreg
= 1;
5194 /* parse_shift will override this if appropriate */
5195 inst
.reloc
.exp
.X_op
= O_constant
;
5196 inst
.reloc
.exp
.X_add_number
= 0;
5198 if (skip_past_comma (str
) == FAIL
)
5201 /* Shift operation on register. */
5202 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5205 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5208 if (skip_past_comma (str
) == SUCCESS
)
5210 /* #x, y -- ie explicit rotation by Y. */
5211 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5214 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5216 inst
.error
= _("constant expression expected");
5220 value
= exp
.X_add_number
;
5221 if (value
< 0 || value
> 30 || value
% 2 != 0)
5223 inst
.error
= _("invalid rotation");
5226 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5228 inst
.error
= _("invalid constant");
5232 /* Encode as specified. */
5233 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5237 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5238 inst
.reloc
.pc_rel
= 0;
5242 /* Group relocation information. Each entry in the table contains the
5243 textual name of the relocation as may appear in assembler source
5244 and must end with a colon.
5245 Along with this textual name are the relocation codes to be used if
5246 the corresponding instruction is an ALU instruction (ADD or SUB only),
5247 an LDR, an LDRS, or an LDC. */
5249 struct group_reloc_table_entry
5260 /* Varieties of non-ALU group relocation. */
5267 static struct group_reloc_table_entry group_reloc_table
[] =
5268 { /* Program counter relative: */
5270 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5275 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5276 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5277 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5278 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5280 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5285 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5286 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5287 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5288 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5290 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5291 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5292 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5293 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5294 /* Section base relative */
5296 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5301 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5302 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5303 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5304 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5306 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5311 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5312 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5313 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5314 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5316 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5317 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5318 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5319 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
5321 /* Given the address of a pointer pointing to the textual name of a group
5322 relocation as may appear in assembler source, attempt to find its details
5323 in group_reloc_table. The pointer will be updated to the character after
5324 the trailing colon. On failure, FAIL will be returned; SUCCESS
5325 otherwise. On success, *entry will be updated to point at the relevant
5326 group_reloc_table entry. */
5329 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5332 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5334 int length
= strlen (group_reloc_table
[i
].name
);
5336 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5337 && (*str
)[length
] == ':')
5339 *out
= &group_reloc_table
[i
];
5340 *str
+= (length
+ 1);
5348 /* Parse a <shifter_operand> for an ARM data processing instruction
5349 (as for parse_shifter_operand) where group relocations are allowed:
5352 #<immediate>, <rotate>
5353 #:<group_reloc>:<expression>
5357 where <group_reloc> is one of the strings defined in group_reloc_table.
5358 The hashes are optional.
5360 Everything else is as for parse_shifter_operand. */
5362 static parse_operand_result
5363 parse_shifter_operand_group_reloc (char **str
, int i
)
5365 /* Determine if we have the sequence of characters #: or just :
5366 coming next. If we do, then we check for a group relocation.
5367 If we don't, punt the whole lot to parse_shifter_operand. */
5369 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5370 || (*str
)[0] == ':')
5372 struct group_reloc_table_entry
*entry
;
5374 if ((*str
)[0] == '#')
5379 /* Try to parse a group relocation. Anything else is an error. */
5380 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5382 inst
.error
= _("unknown group relocation");
5383 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5386 /* We now have the group relocation table entry corresponding to
5387 the name in the assembler source. Next, we parse the expression. */
5388 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5389 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5391 /* Record the relocation type (always the ALU variant here). */
5392 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5393 gas_assert (inst
.reloc
.type
!= 0);
5395 return PARSE_OPERAND_SUCCESS
;
5398 return parse_shifter_operand (str
, i
) == SUCCESS
5399 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5401 /* Never reached. */
5404 /* Parse a Neon alignment expression. Information is written to
5405 inst.operands[i]. We assume the initial ':' has been skipped.
5407 align .imm = align << 8, .immisalign=1, .preind=0 */
5408 static parse_operand_result
5409 parse_neon_alignment (char **str
, int i
)
5414 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5416 if (exp
.X_op
!= O_constant
)
5418 inst
.error
= _("alignment must be constant");
5419 return PARSE_OPERAND_FAIL
;
5422 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5423 inst
.operands
[i
].immisalign
= 1;
5424 /* Alignments are not pre-indexes. */
5425 inst
.operands
[i
].preind
= 0;
5428 return PARSE_OPERAND_SUCCESS
;
5431 /* Parse all forms of an ARM address expression. Information is written
5432 to inst.operands[i] and/or inst.reloc.
5434 Preindexed addressing (.preind=1):
5436 [Rn, #offset] .reg=Rn .reloc.exp=offset
5437 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5438 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5439 .shift_kind=shift .reloc.exp=shift_imm
5441 These three may have a trailing ! which causes .writeback to be set also.
5443 Postindexed addressing (.postind=1, .writeback=1):
5445 [Rn], #offset .reg=Rn .reloc.exp=offset
5446 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5447 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5448 .shift_kind=shift .reloc.exp=shift_imm
5450 Unindexed addressing (.preind=0, .postind=0):
5452 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5456 [Rn]{!} shorthand for [Rn,#0]{!}
5457 =immediate .isreg=0 .reloc.exp=immediate
5458 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5460 It is the caller's responsibility to check for addressing modes not
5461 supported by the instruction, and to set inst.reloc.type. */
5463 static parse_operand_result
5464 parse_address_main (char **str
, int i
, int group_relocations
,
5465 group_reloc_type group_type
)
5470 if (skip_past_char (&p
, '[') == FAIL
)
5472 if (skip_past_char (&p
, '=') == FAIL
)
5474 /* Bare address - translate to PC-relative offset. */
5475 inst
.reloc
.pc_rel
= 1;
5476 inst
.operands
[i
].reg
= REG_PC
;
5477 inst
.operands
[i
].isreg
= 1;
5478 inst
.operands
[i
].preind
= 1;
5480 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5481 return PARSE_OPERAND_FAIL
;
5483 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5484 /*allow_symbol_p=*/TRUE
))
5485 return PARSE_OPERAND_FAIL
;
5488 return PARSE_OPERAND_SUCCESS
;
5491 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5492 skip_whitespace (p
);
5494 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5496 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5497 return PARSE_OPERAND_FAIL
;
5499 inst
.operands
[i
].reg
= reg
;
5500 inst
.operands
[i
].isreg
= 1;
5502 if (skip_past_comma (&p
) == SUCCESS
)
5504 inst
.operands
[i
].preind
= 1;
5507 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5509 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5511 inst
.operands
[i
].imm
= reg
;
5512 inst
.operands
[i
].immisreg
= 1;
5514 if (skip_past_comma (&p
) == SUCCESS
)
5515 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5516 return PARSE_OPERAND_FAIL
;
5518 else if (skip_past_char (&p
, ':') == SUCCESS
)
5520 /* FIXME: '@' should be used here, but it's filtered out by generic
5521 code before we get to see it here. This may be subject to
5523 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5525 if (result
!= PARSE_OPERAND_SUCCESS
)
5530 if (inst
.operands
[i
].negative
)
5532 inst
.operands
[i
].negative
= 0;
5536 if (group_relocations
5537 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5539 struct group_reloc_table_entry
*entry
;
5541 /* Skip over the #: or : sequence. */
5547 /* Try to parse a group relocation. Anything else is an
5549 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5551 inst
.error
= _("unknown group relocation");
5552 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5555 /* We now have the group relocation table entry corresponding to
5556 the name in the assembler source. Next, we parse the
5558 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5559 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5561 /* Record the relocation type. */
5565 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5569 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5573 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5580 if (inst
.reloc
.type
== 0)
5582 inst
.error
= _("this group relocation is not allowed on this instruction");
5583 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5589 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5590 return PARSE_OPERAND_FAIL
;
5591 /* If the offset is 0, find out if it's a +0 or -0. */
5592 if (inst
.reloc
.exp
.X_op
== O_constant
5593 && inst
.reloc
.exp
.X_add_number
== 0)
5595 skip_whitespace (q
);
5599 skip_whitespace (q
);
5602 inst
.operands
[i
].negative
= 1;
5607 else if (skip_past_char (&p
, ':') == SUCCESS
)
5609 /* FIXME: '@' should be used here, but it's filtered out by generic code
5610 before we get to see it here. This may be subject to change. */
5611 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5613 if (result
!= PARSE_OPERAND_SUCCESS
)
5617 if (skip_past_char (&p
, ']') == FAIL
)
5619 inst
.error
= _("']' expected");
5620 return PARSE_OPERAND_FAIL
;
5623 if (skip_past_char (&p
, '!') == SUCCESS
)
5624 inst
.operands
[i
].writeback
= 1;
5626 else if (skip_past_comma (&p
) == SUCCESS
)
5628 if (skip_past_char (&p
, '{') == SUCCESS
)
5630 /* [Rn], {expr} - unindexed, with option */
5631 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5632 0, 255, TRUE
) == FAIL
)
5633 return PARSE_OPERAND_FAIL
;
5635 if (skip_past_char (&p
, '}') == FAIL
)
5637 inst
.error
= _("'}' expected at end of 'option' field");
5638 return PARSE_OPERAND_FAIL
;
5640 if (inst
.operands
[i
].preind
)
5642 inst
.error
= _("cannot combine index with option");
5643 return PARSE_OPERAND_FAIL
;
5646 return PARSE_OPERAND_SUCCESS
;
5650 inst
.operands
[i
].postind
= 1;
5651 inst
.operands
[i
].writeback
= 1;
5653 if (inst
.operands
[i
].preind
)
5655 inst
.error
= _("cannot combine pre- and post-indexing");
5656 return PARSE_OPERAND_FAIL
;
5660 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5662 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5664 /* We might be using the immediate for alignment already. If we
5665 are, OR the register number into the low-order bits. */
5666 if (inst
.operands
[i
].immisalign
)
5667 inst
.operands
[i
].imm
|= reg
;
5669 inst
.operands
[i
].imm
= reg
;
5670 inst
.operands
[i
].immisreg
= 1;
5672 if (skip_past_comma (&p
) == SUCCESS
)
5673 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5674 return PARSE_OPERAND_FAIL
;
5679 if (inst
.operands
[i
].negative
)
5681 inst
.operands
[i
].negative
= 0;
5684 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5685 return PARSE_OPERAND_FAIL
;
5686 /* If the offset is 0, find out if it's a +0 or -0. */
5687 if (inst
.reloc
.exp
.X_op
== O_constant
5688 && inst
.reloc
.exp
.X_add_number
== 0)
5690 skip_whitespace (q
);
5694 skip_whitespace (q
);
5697 inst
.operands
[i
].negative
= 1;
5703 /* If at this point neither .preind nor .postind is set, we have a
5704 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5705 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5707 inst
.operands
[i
].preind
= 1;
5708 inst
.reloc
.exp
.X_op
= O_constant
;
5709 inst
.reloc
.exp
.X_add_number
= 0;
5712 return PARSE_OPERAND_SUCCESS
;
5716 parse_address (char **str
, int i
)
5718 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5722 static parse_operand_result
5723 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5725 return parse_address_main (str
, i
, 1, type
);
5728 /* Parse an operand for a MOVW or MOVT instruction. */
5730 parse_half (char **str
)
5735 skip_past_char (&p
, '#');
5736 if (strncasecmp (p
, ":lower16:", 9) == 0)
5737 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5738 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5739 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5741 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5744 skip_whitespace (p
);
5747 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5750 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5752 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5754 inst
.error
= _("constant expression expected");
5757 if (inst
.reloc
.exp
.X_add_number
< 0
5758 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5760 inst
.error
= _("immediate value out of range");
5768 /* Miscellaneous. */
5770 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5771 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5773 parse_psr (char **str
, bfd_boolean lhs
)
5776 unsigned long psr_field
;
5777 const struct asm_psr
*psr
;
5779 bfd_boolean is_apsr
= FALSE
;
5780 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5782 /* PR gas/12698: If the user has specified -march=all then m_profile will
5783 be TRUE, but we want to ignore it in this case as we are building for any
5784 CPU type, including non-m variants. */
5785 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5788 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5789 feature for ease of use and backwards compatibility. */
5791 if (strncasecmp (p
, "SPSR", 4) == 0)
5794 goto unsupported_psr
;
5796 psr_field
= SPSR_BIT
;
5798 else if (strncasecmp (p
, "CPSR", 4) == 0)
5801 goto unsupported_psr
;
5805 else if (strncasecmp (p
, "APSR", 4) == 0)
5807 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5808 and ARMv7-R architecture CPUs. */
5817 while (ISALNUM (*p
) || *p
== '_');
5819 if (strncasecmp (start
, "iapsr", 5) == 0
5820 || strncasecmp (start
, "eapsr", 5) == 0
5821 || strncasecmp (start
, "xpsr", 4) == 0
5822 || strncasecmp (start
, "psr", 3) == 0)
5823 p
= start
+ strcspn (start
, "rR") + 1;
5825 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5831 /* If APSR is being written, a bitfield may be specified. Note that
5832 APSR itself is handled above. */
5833 if (psr
->field
<= 3)
5835 psr_field
= psr
->field
;
5841 /* M-profile MSR instructions have the mask field set to "10", except
5842 *PSR variants which modify APSR, which may use a different mask (and
5843 have been handled already). Do that by setting the PSR_f field
5845 return psr
->field
| (lhs
? PSR_f
: 0);
5848 goto unsupported_psr
;
5854 /* A suffix follows. */
5860 while (ISALNUM (*p
) || *p
== '_');
5864 /* APSR uses a notation for bits, rather than fields. */
5865 unsigned int nzcvq_bits
= 0;
5866 unsigned int g_bit
= 0;
5869 for (bit
= start
; bit
!= p
; bit
++)
5871 switch (TOLOWER (*bit
))
5874 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5878 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5882 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5886 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5890 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5894 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5898 inst
.error
= _("unexpected bit specified after APSR");
5903 if (nzcvq_bits
== 0x1f)
5908 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5910 inst
.error
= _("selected processor does not "
5911 "support DSP extension");
5918 if ((nzcvq_bits
& 0x20) != 0
5919 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5920 || (g_bit
& 0x2) != 0)
5922 inst
.error
= _("bad bitmask specified after APSR");
5928 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5933 psr_field
|= psr
->field
;
5939 goto error
; /* Garbage after "[CS]PSR". */
5941 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5942 is deprecated, but allow it anyway. */
5946 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5949 else if (!m_profile
)
5950 /* These bits are never right for M-profile devices: don't set them
5951 (only code paths which read/write APSR reach here). */
5952 psr_field
|= (PSR_c
| PSR_f
);
5958 inst
.error
= _("selected processor does not support requested special "
5959 "purpose register");
5963 inst
.error
= _("flag for {c}psr instruction expected");
5967 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5968 value suitable for splatting into the AIF field of the instruction. */
5971 parse_cps_flags (char **str
)
5980 case '\0': case ',':
5983 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5984 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5985 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5988 inst
.error
= _("unrecognized CPS flag");
5993 if (saw_a_flag
== 0)
5995 inst
.error
= _("missing CPS flags");
6003 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6004 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6007 parse_endian_specifier (char **str
)
6012 if (strncasecmp (s
, "BE", 2))
6014 else if (strncasecmp (s
, "LE", 2))
6018 inst
.error
= _("valid endian specifiers are be or le");
6022 if (ISALNUM (s
[2]) || s
[2] == '_')
6024 inst
.error
= _("valid endian specifiers are be or le");
6029 return little_endian
;
6032 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6033 value suitable for poking into the rotate field of an sxt or sxta
6034 instruction, or FAIL on error. */
6037 parse_ror (char **str
)
6042 if (strncasecmp (s
, "ROR", 3) == 0)
6046 inst
.error
= _("missing rotation field after comma");
6050 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6055 case 0: *str
= s
; return 0x0;
6056 case 8: *str
= s
; return 0x1;
6057 case 16: *str
= s
; return 0x2;
6058 case 24: *str
= s
; return 0x3;
6061 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6066 /* Parse a conditional code (from conds[] below). The value returned is in the
6067 range 0 .. 14, or FAIL. */
6069 parse_cond (char **str
)
6072 const struct asm_cond
*c
;
6074 /* Condition codes are always 2 characters, so matching up to
6075 3 characters is sufficient. */
6080 while (ISALPHA (*q
) && n
< 3)
6082 cond
[n
] = TOLOWER (*q
);
6087 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6090 inst
.error
= _("condition required");
6098 /* If the given feature available in the selected CPU, mark it as used.
6099 Returns TRUE iff feature is available. */
6101 mark_feature_used (const arm_feature_set
*feature
)
6103 /* Ensure the option is valid on the current architecture. */
6104 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6107 /* Add the appropriate architecture feature for the barrier option used.
6110 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6112 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6117 /* Parse an option for a barrier instruction. Returns the encoding for the
6120 parse_barrier (char **str
)
6123 const struct asm_barrier_opt
*o
;
6126 while (ISALPHA (*q
))
6129 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6134 if (!mark_feature_used (&o
->arch
))
6141 /* Parse the operands of a table branch instruction. Similar to a memory
6144 parse_tb (char **str
)
6149 if (skip_past_char (&p
, '[') == FAIL
)
6151 inst
.error
= _("'[' expected");
6155 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6157 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6160 inst
.operands
[0].reg
= reg
;
6162 if (skip_past_comma (&p
) == FAIL
)
6164 inst
.error
= _("',' expected");
6168 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6170 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6173 inst
.operands
[0].imm
= reg
;
6175 if (skip_past_comma (&p
) == SUCCESS
)
6177 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6179 if (inst
.reloc
.exp
.X_add_number
!= 1)
6181 inst
.error
= _("invalid shift");
6184 inst
.operands
[0].shifted
= 1;
6187 if (skip_past_char (&p
, ']') == FAIL
)
6189 inst
.error
= _("']' expected");
6196 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6197 information on the types the operands can take and how they are encoded.
6198 Up to four operands may be read; this function handles setting the
6199 ".present" field for each read operand itself.
6200 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6201 else returns FAIL. */
6204 parse_neon_mov (char **str
, int *which_operand
)
6206 int i
= *which_operand
, val
;
6207 enum arm_reg_type rtype
;
6209 struct neon_type_el optype
;
6211 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6213 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6214 inst
.operands
[i
].reg
= val
;
6215 inst
.operands
[i
].isscalar
= 1;
6216 inst
.operands
[i
].vectype
= optype
;
6217 inst
.operands
[i
++].present
= 1;
6219 if (skip_past_comma (&ptr
) == FAIL
)
6222 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6225 inst
.operands
[i
].reg
= val
;
6226 inst
.operands
[i
].isreg
= 1;
6227 inst
.operands
[i
].present
= 1;
6229 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6232 /* Cases 0, 1, 2, 3, 5 (D only). */
6233 if (skip_past_comma (&ptr
) == FAIL
)
6236 inst
.operands
[i
].reg
= val
;
6237 inst
.operands
[i
].isreg
= 1;
6238 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6239 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6240 inst
.operands
[i
].isvec
= 1;
6241 inst
.operands
[i
].vectype
= optype
;
6242 inst
.operands
[i
++].present
= 1;
6244 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6246 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6247 Case 13: VMOV <Sd>, <Rm> */
6248 inst
.operands
[i
].reg
= val
;
6249 inst
.operands
[i
].isreg
= 1;
6250 inst
.operands
[i
].present
= 1;
6252 if (rtype
== REG_TYPE_NQ
)
6254 first_error (_("can't use Neon quad register here"));
6257 else if (rtype
!= REG_TYPE_VFS
)
6260 if (skip_past_comma (&ptr
) == FAIL
)
6262 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6264 inst
.operands
[i
].reg
= val
;
6265 inst
.operands
[i
].isreg
= 1;
6266 inst
.operands
[i
].present
= 1;
6269 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6272 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6273 Case 1: VMOV<c><q> <Dd>, <Dm>
6274 Case 8: VMOV.F32 <Sd>, <Sm>
6275 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6277 inst
.operands
[i
].reg
= val
;
6278 inst
.operands
[i
].isreg
= 1;
6279 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6280 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6281 inst
.operands
[i
].isvec
= 1;
6282 inst
.operands
[i
].vectype
= optype
;
6283 inst
.operands
[i
].present
= 1;
6285 if (skip_past_comma (&ptr
) == SUCCESS
)
6290 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6293 inst
.operands
[i
].reg
= val
;
6294 inst
.operands
[i
].isreg
= 1;
6295 inst
.operands
[i
++].present
= 1;
6297 if (skip_past_comma (&ptr
) == FAIL
)
6300 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6303 inst
.operands
[i
].reg
= val
;
6304 inst
.operands
[i
].isreg
= 1;
6305 inst
.operands
[i
].present
= 1;
6308 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6309 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6310 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6311 Case 10: VMOV.F32 <Sd>, #<imm>
6312 Case 11: VMOV.F64 <Dd>, #<imm> */
6313 inst
.operands
[i
].immisfloat
= 1;
6314 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6316 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6317 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6321 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6325 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6328 inst
.operands
[i
].reg
= val
;
6329 inst
.operands
[i
].isreg
= 1;
6330 inst
.operands
[i
++].present
= 1;
6332 if (skip_past_comma (&ptr
) == FAIL
)
6335 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6337 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6338 inst
.operands
[i
].reg
= val
;
6339 inst
.operands
[i
].isscalar
= 1;
6340 inst
.operands
[i
].present
= 1;
6341 inst
.operands
[i
].vectype
= optype
;
6343 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6345 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6346 inst
.operands
[i
].reg
= val
;
6347 inst
.operands
[i
].isreg
= 1;
6348 inst
.operands
[i
++].present
= 1;
6350 if (skip_past_comma (&ptr
) == FAIL
)
6353 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6356 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6360 inst
.operands
[i
].reg
= val
;
6361 inst
.operands
[i
].isreg
= 1;
6362 inst
.operands
[i
].isvec
= 1;
6363 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6364 inst
.operands
[i
].vectype
= optype
;
6365 inst
.operands
[i
].present
= 1;
6367 if (rtype
== REG_TYPE_VFS
)
6371 if (skip_past_comma (&ptr
) == FAIL
)
6373 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6376 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6379 inst
.operands
[i
].reg
= val
;
6380 inst
.operands
[i
].isreg
= 1;
6381 inst
.operands
[i
].isvec
= 1;
6382 inst
.operands
[i
].issingle
= 1;
6383 inst
.operands
[i
].vectype
= optype
;
6384 inst
.operands
[i
].present
= 1;
6387 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6391 inst
.operands
[i
].reg
= val
;
6392 inst
.operands
[i
].isreg
= 1;
6393 inst
.operands
[i
].isvec
= 1;
6394 inst
.operands
[i
].issingle
= 1;
6395 inst
.operands
[i
].vectype
= optype
;
6396 inst
.operands
[i
].present
= 1;
6401 first_error (_("parse error"));
6405 /* Successfully parsed the operands. Update args. */
6411 first_error (_("expected comma"));
6415 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6419 /* Use this macro when the operand constraints are different
6420 for ARM and THUMB (e.g. ldrd). */
6421 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6422 ((arm_operand) | ((thumb_operand) << 16))
6424 /* Matcher codes for parse_operands. */
6425 enum operand_parse_code
6427 OP_stop
, /* end of line */
6429 OP_RR
, /* ARM register */
6430 OP_RRnpc
, /* ARM register, not r15 */
6431 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6432 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6433 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6434 optional trailing ! */
6435 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6436 OP_RCP
, /* Coprocessor number */
6437 OP_RCN
, /* Coprocessor register */
6438 OP_RF
, /* FPA register */
6439 OP_RVS
, /* VFP single precision register */
6440 OP_RVD
, /* VFP double precision register (0..15) */
6441 OP_RND
, /* Neon double precision register (0..31) */
6442 OP_RNQ
, /* Neon quad precision register */
6443 OP_RVSD
, /* VFP single or double precision register */
6444 OP_RNDQ
, /* Neon double or quad precision register */
6445 OP_RNSDQ
, /* Neon single, double or quad precision register */
6446 OP_RNSC
, /* Neon scalar D[X] */
6447 OP_RVC
, /* VFP control register */
6448 OP_RMF
, /* Maverick F register */
6449 OP_RMD
, /* Maverick D register */
6450 OP_RMFX
, /* Maverick FX register */
6451 OP_RMDX
, /* Maverick DX register */
6452 OP_RMAX
, /* Maverick AX register */
6453 OP_RMDS
, /* Maverick DSPSC register */
6454 OP_RIWR
, /* iWMMXt wR register */
6455 OP_RIWC
, /* iWMMXt wC register */
6456 OP_RIWG
, /* iWMMXt wCG register */
6457 OP_RXA
, /* XScale accumulator register */
6459 OP_REGLST
, /* ARM register list */
6460 OP_VRSLST
, /* VFP single-precision register list */
6461 OP_VRDLST
, /* VFP double-precision register list */
6462 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6463 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6464 OP_NSTRLST
, /* Neon element/structure list */
6466 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6467 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6468 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6469 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6470 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6471 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6472 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6473 OP_VMOV
, /* Neon VMOV operands. */
6474 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6475 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6476 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6478 OP_I0
, /* immediate zero */
6479 OP_I7
, /* immediate value 0 .. 7 */
6480 OP_I15
, /* 0 .. 15 */
6481 OP_I16
, /* 1 .. 16 */
6482 OP_I16z
, /* 0 .. 16 */
6483 OP_I31
, /* 0 .. 31 */
6484 OP_I31w
, /* 0 .. 31, optional trailing ! */
6485 OP_I32
, /* 1 .. 32 */
6486 OP_I32z
, /* 0 .. 32 */
6487 OP_I63
, /* 0 .. 63 */
6488 OP_I63s
, /* -64 .. 63 */
6489 OP_I64
, /* 1 .. 64 */
6490 OP_I64z
, /* 0 .. 64 */
6491 OP_I255
, /* 0 .. 255 */
6493 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6494 OP_I7b
, /* 0 .. 7 */
6495 OP_I15b
, /* 0 .. 15 */
6496 OP_I31b
, /* 0 .. 31 */
6498 OP_SH
, /* shifter operand */
6499 OP_SHG
, /* shifter operand with possible group relocation */
6500 OP_ADDR
, /* Memory address expression (any mode) */
6501 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6502 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6503 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6504 OP_EXP
, /* arbitrary expression */
6505 OP_EXPi
, /* same, with optional immediate prefix */
6506 OP_EXPr
, /* same, with optional relocation suffix */
6507 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6509 OP_CPSF
, /* CPS flags */
6510 OP_ENDI
, /* Endianness specifier */
6511 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6512 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6513 OP_COND
, /* conditional code */
6514 OP_TB
, /* Table branch. */
6516 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6518 OP_RRnpc_I0
, /* ARM register or literal 0 */
6519 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
6520 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6521 OP_RF_IF
, /* FPA register or immediate */
6522 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6523 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6525 /* Optional operands. */
6526 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6527 OP_oI31b
, /* 0 .. 31 */
6528 OP_oI32b
, /* 1 .. 32 */
6529 OP_oI32z
, /* 0 .. 32 */
6530 OP_oIffffb
, /* 0 .. 65535 */
6531 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6533 OP_oRR
, /* ARM register */
6534 OP_oRRnpc
, /* ARM register, not the PC */
6535 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6536 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6537 OP_oRND
, /* Optional Neon double precision register */
6538 OP_oRNQ
, /* Optional Neon quad precision register */
6539 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6540 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6541 OP_oSHll
, /* LSL immediate */
6542 OP_oSHar
, /* ASR immediate */
6543 OP_oSHllar
, /* LSL or ASR immediate */
6544 OP_oROR
, /* ROR 0/8/16/24 */
6545 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6547 /* Some pre-defined mixed (ARM/THUMB) operands. */
6548 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6549 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6550 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6552 OP_FIRST_OPTIONAL
= OP_oI7b
6555 /* Generic instruction operand parser. This does no encoding and no
6556 semantic validation; it merely squirrels values away in the inst
6557 structure. Returns SUCCESS or FAIL depending on whether the
6558 specified grammar matched. */
6560 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6562 unsigned const int *upat
= pattern
;
6563 char *backtrack_pos
= 0;
6564 const char *backtrack_error
= 0;
6565 int i
, val
= 0, backtrack_index
= 0;
6566 enum arm_reg_type rtype
;
6567 parse_operand_result result
;
6568 unsigned int op_parse_code
;
6570 #define po_char_or_fail(chr) \
6573 if (skip_past_char (&str, chr) == FAIL) \
6578 #define po_reg_or_fail(regtype) \
6581 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6582 & inst.operands[i].vectype); \
6585 first_error (_(reg_expected_msgs[regtype])); \
6588 inst.operands[i].reg = val; \
6589 inst.operands[i].isreg = 1; \
6590 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6591 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6592 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6593 || rtype == REG_TYPE_VFD \
6594 || rtype == REG_TYPE_NQ); \
6598 #define po_reg_or_goto(regtype, label) \
6601 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6602 & inst.operands[i].vectype); \
6606 inst.operands[i].reg = val; \
6607 inst.operands[i].isreg = 1; \
6608 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6609 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6610 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6611 || rtype == REG_TYPE_VFD \
6612 || rtype == REG_TYPE_NQ); \
6616 #define po_imm_or_fail(min, max, popt) \
6619 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6621 inst.operands[i].imm = val; \
6625 #define po_scalar_or_goto(elsz, label) \
6628 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6631 inst.operands[i].reg = val; \
6632 inst.operands[i].isscalar = 1; \
6636 #define po_misc_or_fail(expr) \
6644 #define po_misc_or_fail_no_backtrack(expr) \
6648 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6649 backtrack_pos = 0; \
6650 if (result != PARSE_OPERAND_SUCCESS) \
6655 #define po_barrier_or_imm(str) \
6658 val = parse_barrier (&str); \
6659 if (val == FAIL && ! ISALPHA (*str)) \
6662 /* ISB can only take SY as an option. */ \
6663 || ((inst.instruction & 0xf0) == 0x60 \
6666 inst.error = _("invalid barrier type"); \
6667 backtrack_pos = 0; \
6673 skip_whitespace (str
);
6675 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6677 op_parse_code
= upat
[i
];
6678 if (op_parse_code
>= 1<<16)
6679 op_parse_code
= thumb
? (op_parse_code
>> 16)
6680 : (op_parse_code
& ((1<<16)-1));
6682 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6684 /* Remember where we are in case we need to backtrack. */
6685 gas_assert (!backtrack_pos
);
6686 backtrack_pos
= str
;
6687 backtrack_error
= inst
.error
;
6688 backtrack_index
= i
;
6691 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6692 po_char_or_fail (',');
6694 switch (op_parse_code
)
6702 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6703 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6704 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6705 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6706 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6707 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6709 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6711 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6713 /* Also accept generic coprocessor regs for unknown registers. */
6715 po_reg_or_fail (REG_TYPE_CN
);
6717 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6718 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6719 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6720 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6721 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6722 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6723 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6724 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6725 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6726 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6728 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6730 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6731 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6733 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6735 /* Neon scalar. Using an element size of 8 means that some invalid
6736 scalars are accepted here, so deal with those in later code. */
6737 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6741 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6744 po_imm_or_fail (0, 0, TRUE
);
6749 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6754 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6757 if (parse_ifimm_zero (&str
))
6758 inst
.operands
[i
].imm
= 0;
6762 = _("only floating point zero is allowed as immediate value");
6770 po_scalar_or_goto (8, try_rr
);
6773 po_reg_or_fail (REG_TYPE_RN
);
6779 po_scalar_or_goto (8, try_nsdq
);
6782 po_reg_or_fail (REG_TYPE_NSDQ
);
6788 po_scalar_or_goto (8, try_ndq
);
6791 po_reg_or_fail (REG_TYPE_NDQ
);
6797 po_scalar_or_goto (8, try_vfd
);
6800 po_reg_or_fail (REG_TYPE_VFD
);
6805 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6806 not careful then bad things might happen. */
6807 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6812 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6815 /* There's a possibility of getting a 64-bit immediate here, so
6816 we need special handling. */
6817 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6820 inst
.error
= _("immediate value is out of range");
6828 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6831 po_imm_or_fail (0, 63, TRUE
);
6836 po_char_or_fail ('[');
6837 po_reg_or_fail (REG_TYPE_RN
);
6838 po_char_or_fail (']');
6844 po_reg_or_fail (REG_TYPE_RN
);
6845 if (skip_past_char (&str
, '!') == SUCCESS
)
6846 inst
.operands
[i
].writeback
= 1;
6850 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6851 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6852 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6853 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6854 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6855 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6856 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6857 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6858 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6859 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6860 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6861 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6863 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6865 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6866 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6868 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6869 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6870 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6871 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6873 /* Immediate variants */
6875 po_char_or_fail ('{');
6876 po_imm_or_fail (0, 255, TRUE
);
6877 po_char_or_fail ('}');
6881 /* The expression parser chokes on a trailing !, so we have
6882 to find it first and zap it. */
6885 while (*s
&& *s
!= ',')
6890 inst
.operands
[i
].writeback
= 1;
6892 po_imm_or_fail (0, 31, TRUE
);
6900 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6905 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6910 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6912 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6914 val
= parse_reloc (&str
);
6917 inst
.error
= _("unrecognized relocation suffix");
6920 else if (val
!= BFD_RELOC_UNUSED
)
6922 inst
.operands
[i
].imm
= val
;
6923 inst
.operands
[i
].hasreloc
= 1;
6928 /* Operand for MOVW or MOVT. */
6930 po_misc_or_fail (parse_half (&str
));
6933 /* Register or expression. */
6934 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6935 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6937 /* Register or immediate. */
6938 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6939 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6941 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6943 if (!is_immediate_prefix (*str
))
6946 val
= parse_fpa_immediate (&str
);
6949 /* FPA immediates are encoded as registers 8-15.
6950 parse_fpa_immediate has already applied the offset. */
6951 inst
.operands
[i
].reg
= val
;
6952 inst
.operands
[i
].isreg
= 1;
6955 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6956 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6958 /* Two kinds of register. */
6961 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6963 || (rege
->type
!= REG_TYPE_MMXWR
6964 && rege
->type
!= REG_TYPE_MMXWC
6965 && rege
->type
!= REG_TYPE_MMXWCG
))
6967 inst
.error
= _("iWMMXt data or control register expected");
6970 inst
.operands
[i
].reg
= rege
->number
;
6971 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6977 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6979 || (rege
->type
!= REG_TYPE_MMXWC
6980 && rege
->type
!= REG_TYPE_MMXWCG
))
6982 inst
.error
= _("iWMMXt control register expected");
6985 inst
.operands
[i
].reg
= rege
->number
;
6986 inst
.operands
[i
].isreg
= 1;
6991 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6992 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6993 case OP_oROR
: val
= parse_ror (&str
); break;
6994 case OP_COND
: val
= parse_cond (&str
); break;
6995 case OP_oBARRIER_I15
:
6996 po_barrier_or_imm (str
); break;
6998 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7004 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7005 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7007 inst
.error
= _("Banked registers are not available with this "
7013 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7017 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7020 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7022 if (strncasecmp (str
, "APSR_", 5) == 0)
7029 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7030 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7031 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7032 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7033 default: found
= 16;
7037 inst
.operands
[i
].isvec
= 1;
7038 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7039 inst
.operands
[i
].reg
= REG_PC
;
7046 po_misc_or_fail (parse_tb (&str
));
7049 /* Register lists. */
7051 val
= parse_reg_list (&str
);
7054 inst
.operands
[i
].writeback
= 1;
7060 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7064 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7068 /* Allow Q registers too. */
7069 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7074 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7076 inst
.operands
[i
].issingle
= 1;
7081 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7086 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7087 &inst
.operands
[i
].vectype
);
7090 /* Addressing modes */
7092 po_misc_or_fail (parse_address (&str
, i
));
7096 po_misc_or_fail_no_backtrack (
7097 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7101 po_misc_or_fail_no_backtrack (
7102 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7106 po_misc_or_fail_no_backtrack (
7107 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7111 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7115 po_misc_or_fail_no_backtrack (
7116 parse_shifter_operand_group_reloc (&str
, i
));
7120 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7124 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7128 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7132 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7135 /* Various value-based sanity checks and shared operations. We
7136 do not signal immediate failures for the register constraints;
7137 this allows a syntax error to take precedence. */
7138 switch (op_parse_code
)
7146 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7147 inst
.error
= BAD_PC
;
7152 if (inst
.operands
[i
].isreg
)
7154 if (inst
.operands
[i
].reg
== REG_PC
)
7155 inst
.error
= BAD_PC
;
7156 else if (inst
.operands
[i
].reg
== REG_SP
)
7157 inst
.error
= BAD_SP
;
7162 if (inst
.operands
[i
].isreg
7163 && inst
.operands
[i
].reg
== REG_PC
7164 && (inst
.operands
[i
].writeback
|| thumb
))
7165 inst
.error
= BAD_PC
;
7174 case OP_oBARRIER_I15
:
7183 inst
.operands
[i
].imm
= val
;
7190 /* If we get here, this operand was successfully parsed. */
7191 inst
.operands
[i
].present
= 1;
7195 inst
.error
= BAD_ARGS
;
7200 /* The parse routine should already have set inst.error, but set a
7201 default here just in case. */
7203 inst
.error
= _("syntax error");
7207 /* Do not backtrack over a trailing optional argument that
7208 absorbed some text. We will only fail again, with the
7209 'garbage following instruction' error message, which is
7210 probably less helpful than the current one. */
7211 if (backtrack_index
== i
&& backtrack_pos
!= str
7212 && upat
[i
+1] == OP_stop
)
7215 inst
.error
= _("syntax error");
7219 /* Try again, skipping the optional argument at backtrack_pos. */
7220 str
= backtrack_pos
;
7221 inst
.error
= backtrack_error
;
7222 inst
.operands
[backtrack_index
].present
= 0;
7223 i
= backtrack_index
;
7227 /* Check that we have parsed all the arguments. */
7228 if (*str
!= '\0' && !inst
.error
)
7229 inst
.error
= _("garbage following instruction");
7231 return inst
.error
? FAIL
: SUCCESS
;
7234 #undef po_char_or_fail
7235 #undef po_reg_or_fail
7236 #undef po_reg_or_goto
7237 #undef po_imm_or_fail
7238 #undef po_scalar_or_fail
7239 #undef po_barrier_or_imm
7241 /* Shorthand macro for instruction encoding functions issuing errors. */
7242 #define constraint(expr, err) \
7253 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7254 instructions are unpredictable if these registers are used. This
7255 is the BadReg predicate in ARM's Thumb-2 documentation. */
7256 #define reject_bad_reg(reg) \
7258 if (reg == REG_SP || reg == REG_PC) \
7260 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7265 /* If REG is R13 (the stack pointer), warn that its use is
7267 #define warn_deprecated_sp(reg) \
7269 if (warn_on_deprecated && reg == REG_SP) \
7270 as_tsktsk (_("use of r13 is deprecated")); \
7273 /* Functions for operand encoding. ARM, then Thumb. */
7275 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7277 /* If VAL can be encoded in the immediate field of an ARM instruction,
7278 return the encoded form. Otherwise, return FAIL. */
7281 encode_arm_immediate (unsigned int val
)
7285 for (i
= 0; i
< 32; i
+= 2)
7286 if ((a
= rotate_left (val
, i
)) <= 0xff)
7287 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7292 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7293 return the encoded form. Otherwise, return FAIL. */
7295 encode_thumb32_immediate (unsigned int val
)
7302 for (i
= 1; i
<= 24; i
++)
7305 if ((val
& ~(0xff << i
)) == 0)
7306 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7310 if (val
== ((a
<< 16) | a
))
7312 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7316 if (val
== ((a
<< 16) | a
))
7317 return 0x200 | (a
>> 8);
7321 /* Encode a VFP SP or DP register number into inst.instruction. */
7324 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7326 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7329 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7332 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7335 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7340 first_error (_("D register out of range for selected VFP version"));
7348 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7352 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7356 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7360 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7364 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7368 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7376 /* Encode a <shift> in an ARM-format instruction. The immediate,
7377 if any, is handled by md_apply_fix. */
7379 encode_arm_shift (int i
)
7381 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7382 inst
.instruction
|= SHIFT_ROR
<< 5;
7385 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7386 if (inst
.operands
[i
].immisreg
)
7388 inst
.instruction
|= SHIFT_BY_REG
;
7389 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7392 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7397 encode_arm_shifter_operand (int i
)
7399 if (inst
.operands
[i
].isreg
)
7401 inst
.instruction
|= inst
.operands
[i
].reg
;
7402 encode_arm_shift (i
);
7406 inst
.instruction
|= INST_IMMEDIATE
;
7407 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7408 inst
.instruction
|= inst
.operands
[i
].imm
;
7412 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7414 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7417 Generate an error if the operand is not a register. */
7418 constraint (!inst
.operands
[i
].isreg
,
7419 _("Instruction does not support =N addresses"));
7421 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7423 if (inst
.operands
[i
].preind
)
7427 inst
.error
= _("instruction does not accept preindexed addressing");
7430 inst
.instruction
|= PRE_INDEX
;
7431 if (inst
.operands
[i
].writeback
)
7432 inst
.instruction
|= WRITE_BACK
;
7435 else if (inst
.operands
[i
].postind
)
7437 gas_assert (inst
.operands
[i
].writeback
);
7439 inst
.instruction
|= WRITE_BACK
;
7441 else /* unindexed - only for coprocessor */
7443 inst
.error
= _("instruction does not accept unindexed addressing");
7447 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7448 && (((inst
.instruction
& 0x000f0000) >> 16)
7449 == ((inst
.instruction
& 0x0000f000) >> 12)))
7450 as_warn ((inst
.instruction
& LOAD_BIT
)
7451 ? _("destination register same as write-back base")
7452 : _("source register same as write-back base"));
7455 /* inst.operands[i] was set up by parse_address. Encode it into an
7456 ARM-format mode 2 load or store instruction. If is_t is true,
7457 reject forms that cannot be used with a T instruction (i.e. not
7460 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7462 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7464 encode_arm_addr_mode_common (i
, is_t
);
7466 if (inst
.operands
[i
].immisreg
)
7468 constraint ((inst
.operands
[i
].imm
== REG_PC
7469 || (is_pc
&& inst
.operands
[i
].writeback
)),
7471 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7472 inst
.instruction
|= inst
.operands
[i
].imm
;
7473 if (!inst
.operands
[i
].negative
)
7474 inst
.instruction
|= INDEX_UP
;
7475 if (inst
.operands
[i
].shifted
)
7477 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7478 inst
.instruction
|= SHIFT_ROR
<< 5;
7481 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7482 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7486 else /* immediate offset in inst.reloc */
7488 if (is_pc
&& !inst
.reloc
.pc_rel
)
7490 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7492 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7493 cannot use PC in addressing.
7494 PC cannot be used in writeback addressing, either. */
7495 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7498 /* Use of PC in str is deprecated for ARMv7. */
7499 if (warn_on_deprecated
7501 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7502 as_tsktsk (_("use of PC in this instruction is deprecated"));
7505 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7507 /* Prefer + for zero encoded value. */
7508 if (!inst
.operands
[i
].negative
)
7509 inst
.instruction
|= INDEX_UP
;
7510 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7515 /* inst.operands[i] was set up by parse_address. Encode it into an
7516 ARM-format mode 3 load or store instruction. Reject forms that
7517 cannot be used with such instructions. If is_t is true, reject
7518 forms that cannot be used with a T instruction (i.e. not
7521 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7523 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7525 inst
.error
= _("instruction does not accept scaled register index");
7529 encode_arm_addr_mode_common (i
, is_t
);
7531 if (inst
.operands
[i
].immisreg
)
7533 constraint ((inst
.operands
[i
].imm
== REG_PC
7534 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7536 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7538 inst
.instruction
|= inst
.operands
[i
].imm
;
7539 if (!inst
.operands
[i
].negative
)
7540 inst
.instruction
|= INDEX_UP
;
7542 else /* immediate offset in inst.reloc */
7544 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7545 && inst
.operands
[i
].writeback
),
7547 inst
.instruction
|= HWOFFSET_IMM
;
7548 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7550 /* Prefer + for zero encoded value. */
7551 if (!inst
.operands
[i
].negative
)
7552 inst
.instruction
|= INDEX_UP
;
7554 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7559 /* Write immediate bits [7:0] to the following locations:
7561 |28/24|23 19|18 16|15 4|3 0|
7562 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7564 This function is used by VMOV/VMVN/VORR/VBIC. */
7567 neon_write_immbits (unsigned immbits
)
7569 inst
.instruction
|= immbits
& 0xf;
7570 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7571 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7574 /* Invert low-order SIZE bits of XHI:XLO. */
7577 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7579 unsigned immlo
= xlo
? *xlo
: 0;
7580 unsigned immhi
= xhi
? *xhi
: 0;
7585 immlo
= (~immlo
) & 0xff;
7589 immlo
= (~immlo
) & 0xffff;
7593 immhi
= (~immhi
) & 0xffffffff;
7597 immlo
= (~immlo
) & 0xffffffff;
7611 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7615 neon_bits_same_in_bytes (unsigned imm
)
7617 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7618 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7619 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7620 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7623 /* For immediate of above form, return 0bABCD. */
7626 neon_squash_bits (unsigned imm
)
7628 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7629 | ((imm
& 0x01000000) >> 21);
7632 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7635 neon_qfloat_bits (unsigned imm
)
7637 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7640 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7641 the instruction. *OP is passed as the initial value of the op field, and
7642 may be set to a different value depending on the constant (i.e.
7643 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7644 MVN). If the immediate looks like a repeated pattern then also
7645 try smaller element sizes. */
7648 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7649 unsigned *immbits
, int *op
, int size
,
7650 enum neon_el_type type
)
7652 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7654 if (type
== NT_float
&& !float_p
)
7657 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7659 if (size
!= 32 || *op
== 1)
7661 *immbits
= neon_qfloat_bits (immlo
);
7667 if (neon_bits_same_in_bytes (immhi
)
7668 && neon_bits_same_in_bytes (immlo
))
7672 *immbits
= (neon_squash_bits (immhi
) << 4)
7673 | neon_squash_bits (immlo
);
7684 if (immlo
== (immlo
& 0x000000ff))
7689 else if (immlo
== (immlo
& 0x0000ff00))
7691 *immbits
= immlo
>> 8;
7694 else if (immlo
== (immlo
& 0x00ff0000))
7696 *immbits
= immlo
>> 16;
7699 else if (immlo
== (immlo
& 0xff000000))
7701 *immbits
= immlo
>> 24;
7704 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7706 *immbits
= (immlo
>> 8) & 0xff;
7709 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7711 *immbits
= (immlo
>> 16) & 0xff;
7715 if ((immlo
& 0xffff) != (immlo
>> 16))
7722 if (immlo
== (immlo
& 0x000000ff))
7727 else if (immlo
== (immlo
& 0x0000ff00))
7729 *immbits
= immlo
>> 8;
7733 if ((immlo
& 0xff) != (immlo
>> 8))
7738 if (immlo
== (immlo
& 0x000000ff))
7740 /* Don't allow MVN with 8-bit immediate. */
7757 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7758 Determine whether it can be performed with a move instruction; if
7759 it can, convert inst.instruction to that move instruction and
7760 return TRUE; if it can't, convert inst.instruction to a literal-pool
7761 load and return FALSE. If this is not a valid thing to do in the
7762 current context, set inst.error and return TRUE.
7764 inst.operands[i] describes the destination register. */
7767 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7770 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7771 bfd_boolean arm_p
= (t
== CONST_ARM
);
7772 bfd_boolean vec64_p
= (t
== CONST_VEC
) && !inst
.operands
[i
].issingle
;
7775 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7779 if ((inst
.instruction
& tbit
) == 0)
7781 inst
.error
= _("invalid pseudo operation");
7784 if (inst
.reloc
.exp
.X_op
!= O_constant
7785 && inst
.reloc
.exp
.X_op
!= O_symbol
7786 && inst
.reloc
.exp
.X_op
!= O_big
)
7788 inst
.error
= _("constant expression expected");
7791 if ((inst
.reloc
.exp
.X_op
== O_constant
7792 || inst
.reloc
.exp
.X_op
== O_big
)
7793 && !inst
.operands
[i
].issingle
)
7795 if (thumb_p
&& inst
.reloc
.exp
.X_op
== O_constant
)
7797 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
7799 /* This can be done with a mov(1) instruction. */
7800 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7801 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
7805 else if (arm_p
&& inst
.reloc
.exp
.X_op
== O_constant
)
7807 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
7810 /* This can be done with a mov instruction. */
7811 inst
.instruction
&= LITERAL_MASK
;
7812 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
7813 inst
.instruction
|= value
& 0xfff;
7817 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
7820 /* This can be done with a mvn instruction. */
7821 inst
.instruction
&= LITERAL_MASK
;
7822 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
7823 inst
.instruction
|= value
& 0xfff;
7830 unsigned immbits
= 0;
7831 unsigned immlo
= inst
.operands
[1].imm
;
7832 unsigned immhi
= inst
.operands
[1].regisimm
7833 ? inst
.operands
[1].reg
7834 : inst
.reloc
.exp
.X_unsigned
7836 : ((bfd_int64_t
)((int) immlo
)) >> 32;
7837 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7838 &op
, 64, NT_invtype
);
7842 neon_invert_size (&immlo
, &immhi
, 64);
7844 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7845 &op
, 64, NT_invtype
);
7849 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
7854 /* Fill other bits in vmov encoding for both thumb and arm. */
7856 inst
.instruction
|= (0x7 << 29) | (0xF << 24);
7858 inst
.instruction
|= (0xF << 28) | (0x1 << 25);
7859 neon_write_immbits (immbits
);
7865 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
7866 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
7869 inst
.operands
[1].reg
= REG_PC
;
7870 inst
.operands
[1].isreg
= 1;
7871 inst
.operands
[1].preind
= 1;
7872 inst
.reloc
.pc_rel
= 1;
7873 inst
.reloc
.type
= (thumb_p
7874 ? BFD_RELOC_ARM_THUMB_OFFSET
7876 ? BFD_RELOC_ARM_HWLITERAL
7877 : BFD_RELOC_ARM_LITERAL
));
7881 /* inst.operands[i] was set up by parse_address. Encode it into an
7882 ARM-format instruction. Reject all forms which cannot be encoded
7883 into a coprocessor load/store instruction. If wb_ok is false,
7884 reject use of writeback; if unind_ok is false, reject use of
7885 unindexed addressing. If reloc_override is not 0, use it instead
7886 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7887 (in which case it is preserved). */
7890 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
7892 if (!inst
.operands
[i
].isreg
)
7895 if (! inst
.operands
[0].isvec
)
7897 inst
.error
= _("invalid co-processor operand");
7900 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
7904 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7906 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
7908 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
7910 gas_assert (!inst
.operands
[i
].writeback
);
7913 inst
.error
= _("instruction does not support unindexed addressing");
7916 inst
.instruction
|= inst
.operands
[i
].imm
;
7917 inst
.instruction
|= INDEX_UP
;
7921 if (inst
.operands
[i
].preind
)
7922 inst
.instruction
|= PRE_INDEX
;
7924 if (inst
.operands
[i
].writeback
)
7926 if (inst
.operands
[i
].reg
== REG_PC
)
7928 inst
.error
= _("pc may not be used with write-back");
7933 inst
.error
= _("instruction does not support writeback");
7936 inst
.instruction
|= WRITE_BACK
;
7940 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
7941 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
7942 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
7943 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
7946 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
7948 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
7951 /* Prefer + for zero encoded value. */
7952 if (!inst
.operands
[i
].negative
)
7953 inst
.instruction
|= INDEX_UP
;
7958 /* Functions for instruction encoding, sorted by sub-architecture.
7959 First some generics; their names are taken from the conventional
7960 bit positions for register arguments in ARM format instructions. */
7970 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7976 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7977 inst
.instruction
|= inst
.operands
[1].reg
;
7983 inst
.instruction
|= inst
.operands
[0].reg
;
7984 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7990 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7991 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7997 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7998 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8002 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8004 if (ARM_CPU_IS_ANY (cpu_variant
))
8006 as_tsktsk ("%s", msg
);
8009 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8021 unsigned Rn
= inst
.operands
[2].reg
;
8022 /* Enforce restrictions on SWP instruction. */
8023 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8025 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8026 _("Rn must not overlap other operands"));
8028 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8030 if (!check_obsolete (&arm_ext_v8
,
8031 _("swp{b} use is obsoleted for ARMv8 and later"))
8032 && warn_on_deprecated
8033 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8034 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8037 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8038 inst
.instruction
|= inst
.operands
[1].reg
;
8039 inst
.instruction
|= Rn
<< 16;
8045 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8046 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8047 inst
.instruction
|= inst
.operands
[2].reg
;
8053 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8054 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8055 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8056 || inst
.reloc
.exp
.X_add_number
!= 0),
8058 inst
.instruction
|= inst
.operands
[0].reg
;
8059 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8060 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8066 inst
.instruction
|= inst
.operands
[0].imm
;
8072 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8073 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8076 /* ARM instructions, in alphabetical order by function name (except
8077 that wrapper functions appear immediately after the function they
8080 /* This is a pseudo-op of the form "adr rd, label" to be converted
8081 into a relative address of the form "add rd, pc, #label-.-8". */
8086 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8088 /* Frag hacking will turn this into a sub instruction if the offset turns
8089 out to be negative. */
8090 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8091 inst
.reloc
.pc_rel
= 1;
8092 inst
.reloc
.exp
.X_add_number
-= 8;
8095 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8096 into a relative address of the form:
8097 add rd, pc, #low(label-.-8)"
8098 add rd, rd, #high(label-.-8)" */
8103 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8105 /* Frag hacking will turn this into a sub instruction if the offset turns
8106 out to be negative. */
8107 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8108 inst
.reloc
.pc_rel
= 1;
8109 inst
.size
= INSN_SIZE
* 2;
8110 inst
.reloc
.exp
.X_add_number
-= 8;
8116 if (!inst
.operands
[1].present
)
8117 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8118 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8119 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8120 encode_arm_shifter_operand (2);
8126 if (inst
.operands
[0].present
)
8127 inst
.instruction
|= inst
.operands
[0].imm
;
8129 inst
.instruction
|= 0xf;
8135 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8136 constraint (msb
> 32, _("bit-field extends past end of register"));
8137 /* The instruction encoding stores the LSB and MSB,
8138 not the LSB and width. */
8139 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8140 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8141 inst
.instruction
|= (msb
- 1) << 16;
8149 /* #0 in second position is alternative syntax for bfc, which is
8150 the same instruction but with REG_PC in the Rm field. */
8151 if (!inst
.operands
[1].isreg
)
8152 inst
.operands
[1].reg
= REG_PC
;
8154 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8155 constraint (msb
> 32, _("bit-field extends past end of register"));
8156 /* The instruction encoding stores the LSB and MSB,
8157 not the LSB and width. */
8158 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8159 inst
.instruction
|= inst
.operands
[1].reg
;
8160 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8161 inst
.instruction
|= (msb
- 1) << 16;
8167 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8168 _("bit-field extends past end of register"));
8169 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8170 inst
.instruction
|= inst
.operands
[1].reg
;
8171 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8172 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8175 /* ARM V5 breakpoint instruction (argument parse)
8176 BKPT <16 bit unsigned immediate>
8177 Instruction is not conditional.
8178 The bit pattern given in insns[] has the COND_ALWAYS condition,
8179 and it is an error if the caller tried to override that. */
8184 /* Top 12 of 16 bits to bits 19:8. */
8185 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8187 /* Bottom 4 of 16 bits to bits 3:0. */
8188 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8192 encode_branch (int default_reloc
)
8194 if (inst
.operands
[0].hasreloc
)
8196 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8197 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8198 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8199 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8200 ? BFD_RELOC_ARM_PLT32
8201 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8204 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8205 inst
.reloc
.pc_rel
= 1;
8212 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8213 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8216 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8223 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8225 if (inst
.cond
== COND_ALWAYS
)
8226 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8228 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8232 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8235 /* ARM V5 branch-link-exchange instruction (argument parse)
8236 BLX <target_addr> ie BLX(1)
8237 BLX{<condition>} <Rm> ie BLX(2)
8238 Unfortunately, there are two different opcodes for this mnemonic.
8239 So, the insns[].value is not used, and the code here zaps values
8240 into inst.instruction.
8241 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8246 if (inst
.operands
[0].isreg
)
8248 /* Arg is a register; the opcode provided by insns[] is correct.
8249 It is not illegal to do "blx pc", just useless. */
8250 if (inst
.operands
[0].reg
== REG_PC
)
8251 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8253 inst
.instruction
|= inst
.operands
[0].reg
;
8257 /* Arg is an address; this instruction cannot be executed
8258 conditionally, and the opcode must be adjusted.
8259 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8260 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8261 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8262 inst
.instruction
= 0xfa000000;
8263 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8270 bfd_boolean want_reloc
;
8272 if (inst
.operands
[0].reg
== REG_PC
)
8273 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8275 inst
.instruction
|= inst
.operands
[0].reg
;
8276 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8277 it is for ARMv4t or earlier. */
8278 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8279 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8283 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8288 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8292 /* ARM v5TEJ. Jump to Jazelle code. */
8297 if (inst
.operands
[0].reg
== REG_PC
)
8298 as_tsktsk (_("use of r15 in bxj is not really useful"));
8300 inst
.instruction
|= inst
.operands
[0].reg
;
8303 /* Co-processor data operation:
8304 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8305 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8309 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8310 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8311 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8312 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8313 inst
.instruction
|= inst
.operands
[4].reg
;
8314 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8320 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8321 encode_arm_shifter_operand (1);
8324 /* Transfer between coprocessor and ARM registers.
8325 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8330 No special properties. */
8332 struct deprecated_coproc_regs_s
8339 arm_feature_set deprecated
;
8340 arm_feature_set obsoleted
;
8341 const char *dep_msg
;
8342 const char *obs_msg
;
8345 #define DEPR_ACCESS_V8 \
8346 N_("This coprocessor register access is deprecated in ARMv8")
8348 /* Table of all deprecated coprocessor registers. */
8349 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8351 {15, 0, 7, 10, 5, /* CP15DMB. */
8352 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8353 DEPR_ACCESS_V8
, NULL
},
8354 {15, 0, 7, 10, 4, /* CP15DSB. */
8355 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8356 DEPR_ACCESS_V8
, NULL
},
8357 {15, 0, 7, 5, 4, /* CP15ISB. */
8358 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8359 DEPR_ACCESS_V8
, NULL
},
8360 {14, 6, 1, 0, 0, /* TEEHBR. */
8361 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8362 DEPR_ACCESS_V8
, NULL
},
8363 {14, 6, 0, 0, 0, /* TEECR. */
8364 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8365 DEPR_ACCESS_V8
, NULL
},
8368 #undef DEPR_ACCESS_V8
8370 static const size_t deprecated_coproc_reg_count
=
8371 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8379 Rd
= inst
.operands
[2].reg
;
8382 if (inst
.instruction
== 0xee000010
8383 || inst
.instruction
== 0xfe000010)
8385 reject_bad_reg (Rd
);
8388 constraint (Rd
== REG_SP
, BAD_SP
);
8393 if (inst
.instruction
== 0xe000010)
8394 constraint (Rd
== REG_PC
, BAD_PC
);
8397 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8399 const struct deprecated_coproc_regs_s
*r
=
8400 deprecated_coproc_regs
+ i
;
8402 if (inst
.operands
[0].reg
== r
->cp
8403 && inst
.operands
[1].imm
== r
->opc1
8404 && inst
.operands
[3].reg
== r
->crn
8405 && inst
.operands
[4].reg
== r
->crm
8406 && inst
.operands
[5].imm
== r
->opc2
)
8408 if (! ARM_CPU_IS_ANY (cpu_variant
)
8409 && warn_on_deprecated
8410 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8411 as_tsktsk ("%s", r
->dep_msg
);
8415 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8416 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8417 inst
.instruction
|= Rd
<< 12;
8418 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8419 inst
.instruction
|= inst
.operands
[4].reg
;
8420 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8423 /* Transfer between coprocessor register and pair of ARM registers.
8424 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8429 Two XScale instructions are special cases of these:
8431 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8432 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8434 Result unpredictable if Rd or Rn is R15. */
8441 Rd
= inst
.operands
[2].reg
;
8442 Rn
= inst
.operands
[3].reg
;
8446 reject_bad_reg (Rd
);
8447 reject_bad_reg (Rn
);
8451 constraint (Rd
== REG_PC
, BAD_PC
);
8452 constraint (Rn
== REG_PC
, BAD_PC
);
8455 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8456 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8457 inst
.instruction
|= Rd
<< 12;
8458 inst
.instruction
|= Rn
<< 16;
8459 inst
.instruction
|= inst
.operands
[4].reg
;
8465 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8466 if (inst
.operands
[1].present
)
8468 inst
.instruction
|= CPSI_MMOD
;
8469 inst
.instruction
|= inst
.operands
[1].imm
;
8476 inst
.instruction
|= inst
.operands
[0].imm
;
8482 unsigned Rd
, Rn
, Rm
;
8484 Rd
= inst
.operands
[0].reg
;
8485 Rn
= (inst
.operands
[1].present
8486 ? inst
.operands
[1].reg
: Rd
);
8487 Rm
= inst
.operands
[2].reg
;
8489 constraint ((Rd
== REG_PC
), BAD_PC
);
8490 constraint ((Rn
== REG_PC
), BAD_PC
);
8491 constraint ((Rm
== REG_PC
), BAD_PC
);
8493 inst
.instruction
|= Rd
<< 16;
8494 inst
.instruction
|= Rn
<< 0;
8495 inst
.instruction
|= Rm
<< 8;
8501 /* There is no IT instruction in ARM mode. We
8502 process it to do the validation as if in
8503 thumb mode, just in case the code gets
8504 assembled for thumb using the unified syntax. */
8509 set_it_insn_type (IT_INSN
);
8510 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8511 now_it
.cc
= inst
.operands
[0].imm
;
8515 /* If there is only one register in the register list,
8516 then return its register number. Otherwise return -1. */
8518 only_one_reg_in_list (int range
)
8520 int i
= ffs (range
) - 1;
8521 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8525 encode_ldmstm(int from_push_pop_mnem
)
8527 int base_reg
= inst
.operands
[0].reg
;
8528 int range
= inst
.operands
[1].imm
;
8531 inst
.instruction
|= base_reg
<< 16;
8532 inst
.instruction
|= range
;
8534 if (inst
.operands
[1].writeback
)
8535 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8537 if (inst
.operands
[0].writeback
)
8539 inst
.instruction
|= WRITE_BACK
;
8540 /* Check for unpredictable uses of writeback. */
8541 if (inst
.instruction
& LOAD_BIT
)
8543 /* Not allowed in LDM type 2. */
8544 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8545 && ((range
& (1 << REG_PC
)) == 0))
8546 as_warn (_("writeback of base register is UNPREDICTABLE"));
8547 /* Only allowed if base reg not in list for other types. */
8548 else if (range
& (1 << base_reg
))
8549 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8553 /* Not allowed for type 2. */
8554 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8555 as_warn (_("writeback of base register is UNPREDICTABLE"));
8556 /* Only allowed if base reg not in list, or first in list. */
8557 else if ((range
& (1 << base_reg
))
8558 && (range
& ((1 << base_reg
) - 1)))
8559 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8563 /* If PUSH/POP has only one register, then use the A2 encoding. */
8564 one_reg
= only_one_reg_in_list (range
);
8565 if (from_push_pop_mnem
&& one_reg
>= 0)
8567 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8569 inst
.instruction
&= A_COND_MASK
;
8570 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8571 inst
.instruction
|= one_reg
<< 12;
8578 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8581 /* ARMv5TE load-consecutive (argument parse)
8590 constraint (inst
.operands
[0].reg
% 2 != 0,
8591 _("first transfer register must be even"));
8592 constraint (inst
.operands
[1].present
8593 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8594 _("can only transfer two consecutive registers"));
8595 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8596 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8598 if (!inst
.operands
[1].present
)
8599 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8601 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8602 register and the first register written; we have to diagnose
8603 overlap between the base and the second register written here. */
8605 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8606 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8607 as_warn (_("base register written back, and overlaps "
8608 "second transfer register"));
8610 if (!(inst
.instruction
& V4_STR_BIT
))
8612 /* For an index-register load, the index register must not overlap the
8613 destination (even if not write-back). */
8614 if (inst
.operands
[2].immisreg
8615 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8616 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8617 as_warn (_("index register overlaps transfer register"));
8619 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8620 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8626 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8627 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8628 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8629 || inst
.operands
[1].negative
8630 /* This can arise if the programmer has written
8632 or if they have mistakenly used a register name as the last
8635 It is very difficult to distinguish between these two cases
8636 because "rX" might actually be a label. ie the register
8637 name has been occluded by a symbol of the same name. So we
8638 just generate a general 'bad addressing mode' type error
8639 message and leave it up to the programmer to discover the
8640 true cause and fix their mistake. */
8641 || (inst
.operands
[1].reg
== REG_PC
),
8644 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8645 || inst
.reloc
.exp
.X_add_number
!= 0,
8646 _("offset must be zero in ARM encoding"));
8648 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8650 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8651 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8652 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8658 constraint (inst
.operands
[0].reg
% 2 != 0,
8659 _("even register required"));
8660 constraint (inst
.operands
[1].present
8661 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8662 _("can only load two consecutive registers"));
8663 /* If op 1 were present and equal to PC, this function wouldn't
8664 have been called in the first place. */
8665 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8667 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8668 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8671 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8672 which is not a multiple of four is UNPREDICTABLE. */
8674 check_ldr_r15_aligned (void)
8676 constraint (!(inst
.operands
[1].immisreg
)
8677 && (inst
.operands
[0].reg
== REG_PC
8678 && inst
.operands
[1].reg
== REG_PC
8679 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8680 _("ldr to register 15 must be 4-byte alligned"));
8686 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8687 if (!inst
.operands
[1].isreg
)
8688 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8690 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8691 check_ldr_r15_aligned ();
8697 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8699 if (inst
.operands
[1].preind
)
8701 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8702 || inst
.reloc
.exp
.X_add_number
!= 0,
8703 _("this instruction requires a post-indexed address"));
8705 inst
.operands
[1].preind
= 0;
8706 inst
.operands
[1].postind
= 1;
8707 inst
.operands
[1].writeback
= 1;
8709 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8710 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
8713 /* Halfword and signed-byte load/store operations. */
8718 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8719 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8720 if (!inst
.operands
[1].isreg
)
8721 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
8723 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
8729 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8731 if (inst
.operands
[1].preind
)
8733 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8734 || inst
.reloc
.exp
.X_add_number
!= 0,
8735 _("this instruction requires a post-indexed address"));
8737 inst
.operands
[1].preind
= 0;
8738 inst
.operands
[1].postind
= 1;
8739 inst
.operands
[1].writeback
= 1;
8741 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8742 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
8745 /* Co-processor register load/store.
8746 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8750 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8751 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8752 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8758 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8759 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
8760 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
8761 && !(inst
.instruction
& 0x00400000))
8762 as_tsktsk (_("Rd and Rm should be different in mla"));
8764 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8765 inst
.instruction
|= inst
.operands
[1].reg
;
8766 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8767 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8773 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8774 encode_arm_shifter_operand (1);
8777 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8784 top
= (inst
.instruction
& 0x00400000) != 0;
8785 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
8786 _(":lower16: not allowed this instruction"));
8787 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
8788 _(":upper16: not allowed instruction"));
8789 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8790 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
8792 imm
= inst
.reloc
.exp
.X_add_number
;
8793 /* The value is in two pieces: 0:11, 16:19. */
8794 inst
.instruction
|= (imm
& 0x00000fff);
8795 inst
.instruction
|= (imm
& 0x0000f000) << 4;
8799 static void do_vfp_nsyn_opcode (const char *);
8802 do_vfp_nsyn_mrs (void)
8804 if (inst
.operands
[0].isvec
)
8806 if (inst
.operands
[1].reg
!= 1)
8807 first_error (_("operand 1 must be FPSCR"));
8808 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
8809 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
8810 do_vfp_nsyn_opcode ("fmstat");
8812 else if (inst
.operands
[1].isvec
)
8813 do_vfp_nsyn_opcode ("fmrx");
8821 do_vfp_nsyn_msr (void)
8823 if (inst
.operands
[0].isvec
)
8824 do_vfp_nsyn_opcode ("fmxr");
8834 unsigned Rt
= inst
.operands
[0].reg
;
8836 if (thumb_mode
&& Rt
== REG_SP
)
8838 inst
.error
= BAD_SP
;
8842 /* APSR_ sets isvec. All other refs to PC are illegal. */
8843 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
8845 inst
.error
= BAD_PC
;
8849 /* If we get through parsing the register name, we just insert the number
8850 generated into the instruction without further validation. */
8851 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
8852 inst
.instruction
|= (Rt
<< 12);
8858 unsigned Rt
= inst
.operands
[1].reg
;
8861 reject_bad_reg (Rt
);
8862 else if (Rt
== REG_PC
)
8864 inst
.error
= BAD_PC
;
8868 /* If we get through parsing the register name, we just insert the number
8869 generated into the instruction without further validation. */
8870 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
8871 inst
.instruction
|= (Rt
<< 12);
8879 if (do_vfp_nsyn_mrs () == SUCCESS
)
8882 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8883 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8885 if (inst
.operands
[1].isreg
)
8887 br
= inst
.operands
[1].reg
;
8888 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
8889 as_bad (_("bad register for mrs"));
8893 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8894 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
8896 _("'APSR', 'CPSR' or 'SPSR' expected"));
8897 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
8900 inst
.instruction
|= br
;
8903 /* Two possible forms:
8904 "{C|S}PSR_<field>, Rm",
8905 "{C|S}PSR_f, #expression". */
8910 if (do_vfp_nsyn_msr () == SUCCESS
)
8913 inst
.instruction
|= inst
.operands
[0].imm
;
8914 if (inst
.operands
[1].isreg
)
8915 inst
.instruction
|= inst
.operands
[1].reg
;
8918 inst
.instruction
|= INST_IMMEDIATE
;
8919 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8920 inst
.reloc
.pc_rel
= 0;
8927 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
8929 if (!inst
.operands
[2].present
)
8930 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
8931 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8932 inst
.instruction
|= inst
.operands
[1].reg
;
8933 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8935 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
8936 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
8937 as_tsktsk (_("Rd and Rm should be different in mul"));
8940 /* Long Multiply Parser
8941 UMULL RdLo, RdHi, Rm, Rs
8942 SMULL RdLo, RdHi, Rm, Rs
8943 UMLAL RdLo, RdHi, Rm, Rs
8944 SMLAL RdLo, RdHi, Rm, Rs. */
8949 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8950 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8951 inst
.instruction
|= inst
.operands
[2].reg
;
8952 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
8954 /* rdhi and rdlo must be different. */
8955 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8956 as_tsktsk (_("rdhi and rdlo must be different"));
8958 /* rdhi, rdlo and rm must all be different before armv6. */
8959 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
8960 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
8961 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
8962 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8968 if (inst
.operands
[0].present
8969 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
8971 /* Architectural NOP hints are CPSR sets with no bits selected. */
8972 inst
.instruction
&= 0xf0000000;
8973 inst
.instruction
|= 0x0320f000;
8974 if (inst
.operands
[0].present
)
8975 inst
.instruction
|= inst
.operands
[0].imm
;
8979 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8980 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8981 Condition defaults to COND_ALWAYS.
8982 Error if Rd, Rn or Rm are R15. */
8987 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8988 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8989 inst
.instruction
|= inst
.operands
[2].reg
;
8990 if (inst
.operands
[3].present
)
8991 encode_arm_shift (3);
8994 /* ARM V6 PKHTB (Argument Parse). */
8999 if (!inst
.operands
[3].present
)
9001 /* If the shift specifier is omitted, turn the instruction
9002 into pkhbt rd, rm, rn. */
9003 inst
.instruction
&= 0xfff00010;
9004 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9005 inst
.instruction
|= inst
.operands
[1].reg
;
9006 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9010 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9011 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9012 inst
.instruction
|= inst
.operands
[2].reg
;
9013 encode_arm_shift (3);
9017 /* ARMv5TE: Preload-Cache
9018 MP Extensions: Preload for write
9022 Syntactically, like LDR with B=1, W=0, L=1. */
9027 constraint (!inst
.operands
[0].isreg
,
9028 _("'[' expected after PLD mnemonic"));
9029 constraint (inst
.operands
[0].postind
,
9030 _("post-indexed expression used in preload instruction"));
9031 constraint (inst
.operands
[0].writeback
,
9032 _("writeback used in preload instruction"));
9033 constraint (!inst
.operands
[0].preind
,
9034 _("unindexed addressing used in preload instruction"));
9035 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9038 /* ARMv7: PLI <addr_mode> */
9042 constraint (!inst
.operands
[0].isreg
,
9043 _("'[' expected after PLI mnemonic"));
9044 constraint (inst
.operands
[0].postind
,
9045 _("post-indexed expression used in preload instruction"));
9046 constraint (inst
.operands
[0].writeback
,
9047 _("writeback used in preload instruction"));
9048 constraint (!inst
.operands
[0].preind
,
9049 _("unindexed addressing used in preload instruction"));
9050 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9051 inst
.instruction
&= ~PRE_INDEX
;
9057 constraint (inst
.operands
[0].writeback
,
9058 _("push/pop do not support {reglist}^"));
9059 inst
.operands
[1] = inst
.operands
[0];
9060 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9061 inst
.operands
[0].isreg
= 1;
9062 inst
.operands
[0].writeback
= 1;
9063 inst
.operands
[0].reg
= REG_SP
;
9064 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9067 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9068 word at the specified address and the following word
9070 Unconditionally executed.
9071 Error if Rn is R15. */
9076 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9077 if (inst
.operands
[0].writeback
)
9078 inst
.instruction
|= WRITE_BACK
;
9081 /* ARM V6 ssat (argument parse). */
9086 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9087 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9088 inst
.instruction
|= inst
.operands
[2].reg
;
9090 if (inst
.operands
[3].present
)
9091 encode_arm_shift (3);
9094 /* ARM V6 usat (argument parse). */
9099 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9100 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9101 inst
.instruction
|= inst
.operands
[2].reg
;
9103 if (inst
.operands
[3].present
)
9104 encode_arm_shift (3);
9107 /* ARM V6 ssat16 (argument parse). */
9112 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9113 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9114 inst
.instruction
|= inst
.operands
[2].reg
;
9120 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9121 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9122 inst
.instruction
|= inst
.operands
[2].reg
;
9125 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9126 preserving the other bits.
9128 setend <endian_specifier>, where <endian_specifier> is either
9134 if (warn_on_deprecated
9135 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9136 as_tsktsk (_("setend use is deprecated for ARMv8"));
9138 if (inst
.operands
[0].imm
)
9139 inst
.instruction
|= 0x200;
9145 unsigned int Rm
= (inst
.operands
[1].present
9146 ? inst
.operands
[1].reg
9147 : inst
.operands
[0].reg
);
9149 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9150 inst
.instruction
|= Rm
;
9151 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9153 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9154 inst
.instruction
|= SHIFT_BY_REG
;
9155 /* PR 12854: Error on extraneous shifts. */
9156 constraint (inst
.operands
[2].shifted
,
9157 _("extraneous shift as part of operand to shift insn"));
9160 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9166 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9167 inst
.reloc
.pc_rel
= 0;
9173 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9174 inst
.reloc
.pc_rel
= 0;
9180 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9181 inst
.reloc
.pc_rel
= 0;
9184 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9185 SMLAxy{cond} Rd,Rm,Rs,Rn
9186 SMLAWy{cond} Rd,Rm,Rs,Rn
9187 Error if any register is R15. */
9192 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9193 inst
.instruction
|= inst
.operands
[1].reg
;
9194 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9195 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9198 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9199 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9200 Error if any register is R15.
9201 Warning if Rdlo == Rdhi. */
9206 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9207 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9208 inst
.instruction
|= inst
.operands
[2].reg
;
9209 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9211 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9212 as_tsktsk (_("rdhi and rdlo must be different"));
9215 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9216 SMULxy{cond} Rd,Rm,Rs
9217 Error if any register is R15. */
9222 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9223 inst
.instruction
|= inst
.operands
[1].reg
;
9224 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9227 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9228 the same for both ARM and Thumb-2. */
9235 if (inst
.operands
[0].present
)
9237 reg
= inst
.operands
[0].reg
;
9238 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9243 inst
.instruction
|= reg
<< 16;
9244 inst
.instruction
|= inst
.operands
[1].imm
;
9245 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9246 inst
.instruction
|= WRITE_BACK
;
9249 /* ARM V6 strex (argument parse). */
9254 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9255 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9256 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9257 || inst
.operands
[2].negative
9258 /* See comment in do_ldrex(). */
9259 || (inst
.operands
[2].reg
== REG_PC
),
9262 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9263 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9265 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9266 || inst
.reloc
.exp
.X_add_number
!= 0,
9267 _("offset must be zero in ARM encoding"));
9269 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9270 inst
.instruction
|= inst
.operands
[1].reg
;
9271 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9272 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9278 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9279 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9280 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9281 || inst
.operands
[2].negative
,
9284 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9285 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9293 constraint (inst
.operands
[1].reg
% 2 != 0,
9294 _("even register required"));
9295 constraint (inst
.operands
[2].present
9296 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9297 _("can only store two consecutive registers"));
9298 /* If op 2 were present and equal to PC, this function wouldn't
9299 have been called in the first place. */
9300 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9302 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9303 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9304 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9307 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9308 inst
.instruction
|= inst
.operands
[1].reg
;
9309 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9316 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9317 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9325 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9326 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9331 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9332 extends it to 32-bits, and adds the result to a value in another
9333 register. You can specify a rotation by 0, 8, 16, or 24 bits
9334 before extracting the 16-bit value.
9335 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9336 Condition defaults to COND_ALWAYS.
9337 Error if any register uses R15. */
9342 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9343 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9344 inst
.instruction
|= inst
.operands
[2].reg
;
9345 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9350 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9351 Condition defaults to COND_ALWAYS.
9352 Error if any register uses R15. */
9357 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9358 inst
.instruction
|= inst
.operands
[1].reg
;
9359 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9362 /* VFP instructions. In a logical order: SP variant first, monad
9363 before dyad, arithmetic then move then load/store. */
9366 do_vfp_sp_monadic (void)
9368 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9369 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9373 do_vfp_sp_dyadic (void)
9375 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9376 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9377 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9381 do_vfp_sp_compare_z (void)
9383 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9387 do_vfp_dp_sp_cvt (void)
9389 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9390 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9394 do_vfp_sp_dp_cvt (void)
9396 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9397 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9401 do_vfp_reg_from_sp (void)
9403 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9404 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9408 do_vfp_reg2_from_sp2 (void)
9410 constraint (inst
.operands
[2].imm
!= 2,
9411 _("only two consecutive VFP SP registers allowed here"));
9412 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9413 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9414 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9418 do_vfp_sp_from_reg (void)
9420 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9421 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9425 do_vfp_sp2_from_reg2 (void)
9427 constraint (inst
.operands
[0].imm
!= 2,
9428 _("only two consecutive VFP SP registers allowed here"));
9429 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9430 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9431 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9435 do_vfp_sp_ldst (void)
9437 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9438 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9442 do_vfp_dp_ldst (void)
9444 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9445 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9450 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9452 if (inst
.operands
[0].writeback
)
9453 inst
.instruction
|= WRITE_BACK
;
9455 constraint (ldstm_type
!= VFP_LDSTMIA
,
9456 _("this addressing mode requires base-register writeback"));
9457 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9458 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9459 inst
.instruction
|= inst
.operands
[1].imm
;
9463 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9467 if (inst
.operands
[0].writeback
)
9468 inst
.instruction
|= WRITE_BACK
;
9470 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9471 _("this addressing mode requires base-register writeback"));
9473 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9474 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9476 count
= inst
.operands
[1].imm
<< 1;
9477 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9480 inst
.instruction
|= count
;
9484 do_vfp_sp_ldstmia (void)
9486 vfp_sp_ldstm (VFP_LDSTMIA
);
9490 do_vfp_sp_ldstmdb (void)
9492 vfp_sp_ldstm (VFP_LDSTMDB
);
9496 do_vfp_dp_ldstmia (void)
9498 vfp_dp_ldstm (VFP_LDSTMIA
);
9502 do_vfp_dp_ldstmdb (void)
9504 vfp_dp_ldstm (VFP_LDSTMDB
);
9508 do_vfp_xp_ldstmia (void)
9510 vfp_dp_ldstm (VFP_LDSTMIAX
);
9514 do_vfp_xp_ldstmdb (void)
9516 vfp_dp_ldstm (VFP_LDSTMDBX
);
9520 do_vfp_dp_rd_rm (void)
9522 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9523 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9527 do_vfp_dp_rn_rd (void)
9529 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9530 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9534 do_vfp_dp_rd_rn (void)
9536 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9537 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9541 do_vfp_dp_rd_rn_rm (void)
9543 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9544 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9545 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9551 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9555 do_vfp_dp_rm_rd_rn (void)
9557 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9558 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9559 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9562 /* VFPv3 instructions. */
9564 do_vfp_sp_const (void)
9566 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9567 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9568 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9572 do_vfp_dp_const (void)
9574 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9575 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9576 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9580 vfp_conv (int srcsize
)
9582 int immbits
= srcsize
- inst
.operands
[1].imm
;
9584 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9586 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9587 i.e. immbits must be in range 0 - 16. */
9588 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9591 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9593 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9594 i.e. immbits must be in range 0 - 31. */
9595 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9599 inst
.instruction
|= (immbits
& 1) << 5;
9600 inst
.instruction
|= (immbits
>> 1);
9604 do_vfp_sp_conv_16 (void)
9606 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9611 do_vfp_dp_conv_16 (void)
9613 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9618 do_vfp_sp_conv_32 (void)
9620 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9625 do_vfp_dp_conv_32 (void)
9627 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9631 /* FPA instructions. Also in a logical order. */
9636 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9637 inst
.instruction
|= inst
.operands
[1].reg
;
9641 do_fpa_ldmstm (void)
9643 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9644 switch (inst
.operands
[1].imm
)
9646 case 1: inst
.instruction
|= CP_T_X
; break;
9647 case 2: inst
.instruction
|= CP_T_Y
; break;
9648 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9653 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9655 /* The instruction specified "ea" or "fd", so we can only accept
9656 [Rn]{!}. The instruction does not really support stacking or
9657 unstacking, so we have to emulate these by setting appropriate
9658 bits and offsets. */
9659 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9660 || inst
.reloc
.exp
.X_add_number
!= 0,
9661 _("this instruction does not support indexing"));
9663 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9664 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9666 if (!(inst
.instruction
& INDEX_UP
))
9667 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9669 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9671 inst
.operands
[2].preind
= 0;
9672 inst
.operands
[2].postind
= 1;
9676 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9679 /* iWMMXt instructions: strictly in alphabetical order. */
9682 do_iwmmxt_tandorc (void)
9684 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
9688 do_iwmmxt_textrc (void)
9690 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9691 inst
.instruction
|= inst
.operands
[1].imm
;
9695 do_iwmmxt_textrm (void)
9697 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9698 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9699 inst
.instruction
|= inst
.operands
[2].imm
;
9703 do_iwmmxt_tinsr (void)
9705 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9706 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9707 inst
.instruction
|= inst
.operands
[2].imm
;
9711 do_iwmmxt_tmia (void)
9713 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
9714 inst
.instruction
|= inst
.operands
[1].reg
;
9715 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9719 do_iwmmxt_waligni (void)
9721 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9722 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9723 inst
.instruction
|= inst
.operands
[2].reg
;
9724 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
9728 do_iwmmxt_wmerge (void)
9730 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9731 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9732 inst
.instruction
|= inst
.operands
[2].reg
;
9733 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
9737 do_iwmmxt_wmov (void)
9739 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9740 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9741 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9742 inst
.instruction
|= inst
.operands
[1].reg
;
9746 do_iwmmxt_wldstbh (void)
9749 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9751 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
9753 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
9754 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
9758 do_iwmmxt_wldstw (void)
9760 /* RIWR_RIWC clears .isreg for a control register. */
9761 if (!inst
.operands
[0].isreg
)
9763 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
9764 inst
.instruction
|= 0xf0000000;
9767 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9768 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
9772 do_iwmmxt_wldstd (void)
9774 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9775 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
9776 && inst
.operands
[1].immisreg
)
9778 inst
.instruction
&= ~0x1a000ff;
9779 inst
.instruction
|= (0xf << 28);
9780 if (inst
.operands
[1].preind
)
9781 inst
.instruction
|= PRE_INDEX
;
9782 if (!inst
.operands
[1].negative
)
9783 inst
.instruction
|= INDEX_UP
;
9784 if (inst
.operands
[1].writeback
)
9785 inst
.instruction
|= WRITE_BACK
;
9786 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9787 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
9788 inst
.instruction
|= inst
.operands
[1].imm
;
9791 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
9795 do_iwmmxt_wshufh (void)
9797 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9798 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9799 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
9800 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
9804 do_iwmmxt_wzero (void)
9806 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9807 inst
.instruction
|= inst
.operands
[0].reg
;
9808 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9809 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9813 do_iwmmxt_wrwrwr_or_imm5 (void)
9815 if (inst
.operands
[2].isreg
)
9818 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
9819 _("immediate operand requires iWMMXt2"));
9821 if (inst
.operands
[2].imm
== 0)
9823 switch ((inst
.instruction
>> 20) & 0xf)
9829 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9830 inst
.operands
[2].imm
= 16;
9831 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
9837 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9838 inst
.operands
[2].imm
= 32;
9839 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
9846 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9848 wrn
= (inst
.instruction
>> 16) & 0xf;
9849 inst
.instruction
&= 0xff0fff0f;
9850 inst
.instruction
|= wrn
;
9851 /* Bail out here; the instruction is now assembled. */
9856 /* Map 32 -> 0, etc. */
9857 inst
.operands
[2].imm
&= 0x1f;
9858 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
9862 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9863 operations first, then control, shift, and load/store. */
9865 /* Insns like "foo X,Y,Z". */
9868 do_mav_triple (void)
9870 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9871 inst
.instruction
|= inst
.operands
[1].reg
;
9872 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9875 /* Insns like "foo W,X,Y,Z".
9876 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9881 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
9882 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9883 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9884 inst
.instruction
|= inst
.operands
[3].reg
;
9887 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9891 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9894 /* Maverick shift immediate instructions.
9895 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9896 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9901 int imm
= inst
.operands
[2].imm
;
9903 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9904 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9906 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9907 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9908 Bit 4 should be 0. */
9909 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
9911 inst
.instruction
|= imm
;
9914 /* XScale instructions. Also sorted arithmetic before move. */
9916 /* Xscale multiply-accumulate (argument parse)
9919 MIAxycc acc0,Rm,Rs. */
9924 inst
.instruction
|= inst
.operands
[1].reg
;
9925 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9928 /* Xscale move-accumulator-register (argument parse)
9930 MARcc acc0,RdLo,RdHi. */
9935 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9936 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9939 /* Xscale move-register-accumulator (argument parse)
9941 MRAcc RdLo,RdHi,acc0. */
9946 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
9947 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9948 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9951 /* Encoding functions relevant only to Thumb. */
9953 /* inst.operands[i] is a shifted-register operand; encode
9954 it into inst.instruction in the format used by Thumb32. */
9957 encode_thumb32_shifted_operand (int i
)
9959 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
9960 unsigned int shift
= inst
.operands
[i
].shift_kind
;
9962 constraint (inst
.operands
[i
].immisreg
,
9963 _("shift by register not allowed in thumb mode"));
9964 inst
.instruction
|= inst
.operands
[i
].reg
;
9965 if (shift
== SHIFT_RRX
)
9966 inst
.instruction
|= SHIFT_ROR
<< 4;
9969 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9970 _("expression too complex"));
9972 constraint (value
> 32
9973 || (value
== 32 && (shift
== SHIFT_LSL
9974 || shift
== SHIFT_ROR
)),
9975 _("shift expression is too large"));
9979 else if (value
== 32)
9982 inst
.instruction
|= shift
<< 4;
9983 inst
.instruction
|= (value
& 0x1c) << 10;
9984 inst
.instruction
|= (value
& 0x03) << 6;
9989 /* inst.operands[i] was set up by parse_address. Encode it into a
9990 Thumb32 format load or store instruction. Reject forms that cannot
9991 be used with such instructions. If is_t is true, reject forms that
9992 cannot be used with a T instruction; if is_d is true, reject forms
9993 that cannot be used with a D instruction. If it is a store insn,
9997 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
9999 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10001 constraint (!inst
.operands
[i
].isreg
,
10002 _("Instruction does not support =N addresses"));
10004 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10005 if (inst
.operands
[i
].immisreg
)
10007 constraint (is_pc
, BAD_PC_ADDRESSING
);
10008 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10009 constraint (inst
.operands
[i
].negative
,
10010 _("Thumb does not support negative register indexing"));
10011 constraint (inst
.operands
[i
].postind
,
10012 _("Thumb does not support register post-indexing"));
10013 constraint (inst
.operands
[i
].writeback
,
10014 _("Thumb does not support register indexing with writeback"));
10015 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10016 _("Thumb supports only LSL in shifted register indexing"));
10018 inst
.instruction
|= inst
.operands
[i
].imm
;
10019 if (inst
.operands
[i
].shifted
)
10021 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10022 _("expression too complex"));
10023 constraint (inst
.reloc
.exp
.X_add_number
< 0
10024 || inst
.reloc
.exp
.X_add_number
> 3,
10025 _("shift out of range"));
10026 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10028 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10030 else if (inst
.operands
[i
].preind
)
10032 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10033 constraint (is_t
&& inst
.operands
[i
].writeback
,
10034 _("cannot use writeback with this instruction"));
10035 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10036 BAD_PC_ADDRESSING
);
10040 inst
.instruction
|= 0x01000000;
10041 if (inst
.operands
[i
].writeback
)
10042 inst
.instruction
|= 0x00200000;
10046 inst
.instruction
|= 0x00000c00;
10047 if (inst
.operands
[i
].writeback
)
10048 inst
.instruction
|= 0x00000100;
10050 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10052 else if (inst
.operands
[i
].postind
)
10054 gas_assert (inst
.operands
[i
].writeback
);
10055 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10056 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10059 inst
.instruction
|= 0x00200000;
10061 inst
.instruction
|= 0x00000900;
10062 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10064 else /* unindexed - only for coprocessor */
10065 inst
.error
= _("instruction does not accept unindexed addressing");
10068 /* Table of Thumb instructions which exist in both 16- and 32-bit
10069 encodings (the latter only in post-V6T2 cores). The index is the
10070 value used in the insns table below. When there is more than one
10071 possible 16-bit encoding for the instruction, this table always
10073 Also contains several pseudo-instructions used during relaxation. */
10074 #define T16_32_TAB \
10075 X(_adc, 4140, eb400000), \
10076 X(_adcs, 4140, eb500000), \
10077 X(_add, 1c00, eb000000), \
10078 X(_adds, 1c00, eb100000), \
10079 X(_addi, 0000, f1000000), \
10080 X(_addis, 0000, f1100000), \
10081 X(_add_pc,000f, f20f0000), \
10082 X(_add_sp,000d, f10d0000), \
10083 X(_adr, 000f, f20f0000), \
10084 X(_and, 4000, ea000000), \
10085 X(_ands, 4000, ea100000), \
10086 X(_asr, 1000, fa40f000), \
10087 X(_asrs, 1000, fa50f000), \
10088 X(_b, e000, f000b000), \
10089 X(_bcond, d000, f0008000), \
10090 X(_bic, 4380, ea200000), \
10091 X(_bics, 4380, ea300000), \
10092 X(_cmn, 42c0, eb100f00), \
10093 X(_cmp, 2800, ebb00f00), \
10094 X(_cpsie, b660, f3af8400), \
10095 X(_cpsid, b670, f3af8600), \
10096 X(_cpy, 4600, ea4f0000), \
10097 X(_dec_sp,80dd, f1ad0d00), \
10098 X(_eor, 4040, ea800000), \
10099 X(_eors, 4040, ea900000), \
10100 X(_inc_sp,00dd, f10d0d00), \
10101 X(_ldmia, c800, e8900000), \
10102 X(_ldr, 6800, f8500000), \
10103 X(_ldrb, 7800, f8100000), \
10104 X(_ldrh, 8800, f8300000), \
10105 X(_ldrsb, 5600, f9100000), \
10106 X(_ldrsh, 5e00, f9300000), \
10107 X(_ldr_pc,4800, f85f0000), \
10108 X(_ldr_pc2,4800, f85f0000), \
10109 X(_ldr_sp,9800, f85d0000), \
10110 X(_lsl, 0000, fa00f000), \
10111 X(_lsls, 0000, fa10f000), \
10112 X(_lsr, 0800, fa20f000), \
10113 X(_lsrs, 0800, fa30f000), \
10114 X(_mov, 2000, ea4f0000), \
10115 X(_movs, 2000, ea5f0000), \
10116 X(_mul, 4340, fb00f000), \
10117 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10118 X(_mvn, 43c0, ea6f0000), \
10119 X(_mvns, 43c0, ea7f0000), \
10120 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10121 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10122 X(_orr, 4300, ea400000), \
10123 X(_orrs, 4300, ea500000), \
10124 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10125 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10126 X(_rev, ba00, fa90f080), \
10127 X(_rev16, ba40, fa90f090), \
10128 X(_revsh, bac0, fa90f0b0), \
10129 X(_ror, 41c0, fa60f000), \
10130 X(_rors, 41c0, fa70f000), \
10131 X(_sbc, 4180, eb600000), \
10132 X(_sbcs, 4180, eb700000), \
10133 X(_stmia, c000, e8800000), \
10134 X(_str, 6000, f8400000), \
10135 X(_strb, 7000, f8000000), \
10136 X(_strh, 8000, f8200000), \
10137 X(_str_sp,9000, f84d0000), \
10138 X(_sub, 1e00, eba00000), \
10139 X(_subs, 1e00, ebb00000), \
10140 X(_subi, 8000, f1a00000), \
10141 X(_subis, 8000, f1b00000), \
10142 X(_sxtb, b240, fa4ff080), \
10143 X(_sxth, b200, fa0ff080), \
10144 X(_tst, 4200, ea100f00), \
10145 X(_uxtb, b2c0, fa5ff080), \
10146 X(_uxth, b280, fa1ff080), \
10147 X(_nop, bf00, f3af8000), \
10148 X(_yield, bf10, f3af8001), \
10149 X(_wfe, bf20, f3af8002), \
10150 X(_wfi, bf30, f3af8003), \
10151 X(_sev, bf40, f3af8004), \
10152 X(_sevl, bf50, f3af8005), \
10153 X(_udf, de00, f7f0a000)
10155 /* To catch errors in encoding functions, the codes are all offset by
10156 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10157 as 16-bit instructions. */
10158 #define X(a,b,c) T_MNEM##a
10159 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10162 #define X(a,b,c) 0x##b
10163 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10164 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10167 #define X(a,b,c) 0x##c
10168 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10169 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10170 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10174 /* Thumb instruction encoders, in alphabetical order. */
10176 /* ADDW or SUBW. */
10179 do_t_add_sub_w (void)
10183 Rd
= inst
.operands
[0].reg
;
10184 Rn
= inst
.operands
[1].reg
;
10186 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10187 is the SP-{plus,minus}-immediate form of the instruction. */
10189 constraint (Rd
== REG_PC
, BAD_PC
);
10191 reject_bad_reg (Rd
);
10193 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10194 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10197 /* Parse an add or subtract instruction. We get here with inst.instruction
10198 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10201 do_t_add_sub (void)
10205 Rd
= inst
.operands
[0].reg
;
10206 Rs
= (inst
.operands
[1].present
10207 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10208 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10211 set_it_insn_type_last ();
10213 if (unified_syntax
)
10216 bfd_boolean narrow
;
10219 flags
= (inst
.instruction
== T_MNEM_adds
10220 || inst
.instruction
== T_MNEM_subs
);
10222 narrow
= !in_it_block ();
10224 narrow
= in_it_block ();
10225 if (!inst
.operands
[2].isreg
)
10229 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10231 add
= (inst
.instruction
== T_MNEM_add
10232 || inst
.instruction
== T_MNEM_adds
);
10234 if (inst
.size_req
!= 4)
10236 /* Attempt to use a narrow opcode, with relaxation if
10238 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10239 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10240 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10241 opcode
= T_MNEM_add_sp
;
10242 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10243 opcode
= T_MNEM_add_pc
;
10244 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10247 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10249 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10253 inst
.instruction
= THUMB_OP16(opcode
);
10254 inst
.instruction
|= (Rd
<< 4) | Rs
;
10255 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10256 if (inst
.size_req
!= 2)
10257 inst
.relax
= opcode
;
10260 constraint (inst
.size_req
== 2, BAD_HIREG
);
10262 if (inst
.size_req
== 4
10263 || (inst
.size_req
!= 2 && !opcode
))
10267 constraint (add
, BAD_PC
);
10268 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10269 _("only SUBS PC, LR, #const allowed"));
10270 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10271 _("expression too complex"));
10272 constraint (inst
.reloc
.exp
.X_add_number
< 0
10273 || inst
.reloc
.exp
.X_add_number
> 0xff,
10274 _("immediate value out of range"));
10275 inst
.instruction
= T2_SUBS_PC_LR
10276 | inst
.reloc
.exp
.X_add_number
;
10277 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10280 else if (Rs
== REG_PC
)
10282 /* Always use addw/subw. */
10283 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10284 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10288 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10289 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10292 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10294 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10296 inst
.instruction
|= Rd
<< 8;
10297 inst
.instruction
|= Rs
<< 16;
10302 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10303 unsigned int shift
= inst
.operands
[2].shift_kind
;
10305 Rn
= inst
.operands
[2].reg
;
10306 /* See if we can do this with a 16-bit instruction. */
10307 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10309 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10314 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10315 || inst
.instruction
== T_MNEM_add
)
10317 : T_OPCODE_SUB_R3
);
10318 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10322 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10324 /* Thumb-1 cores (except v6-M) require at least one high
10325 register in a narrow non flag setting add. */
10326 if (Rd
> 7 || Rn
> 7
10327 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10328 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10335 inst
.instruction
= T_OPCODE_ADD_HI
;
10336 inst
.instruction
|= (Rd
& 8) << 4;
10337 inst
.instruction
|= (Rd
& 7);
10338 inst
.instruction
|= Rn
<< 3;
10344 constraint (Rd
== REG_PC
, BAD_PC
);
10345 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10346 constraint (Rs
== REG_PC
, BAD_PC
);
10347 reject_bad_reg (Rn
);
10349 /* If we get here, it can't be done in 16 bits. */
10350 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10351 _("shift must be constant"));
10352 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10353 inst
.instruction
|= Rd
<< 8;
10354 inst
.instruction
|= Rs
<< 16;
10355 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10356 _("shift value over 3 not allowed in thumb mode"));
10357 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10358 _("only LSL shift allowed in thumb mode"));
10359 encode_thumb32_shifted_operand (2);
10364 constraint (inst
.instruction
== T_MNEM_adds
10365 || inst
.instruction
== T_MNEM_subs
,
10368 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10370 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10371 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10374 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10375 ? 0x0000 : 0x8000);
10376 inst
.instruction
|= (Rd
<< 4) | Rs
;
10377 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10381 Rn
= inst
.operands
[2].reg
;
10382 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10384 /* We now have Rd, Rs, and Rn set to registers. */
10385 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10387 /* Can't do this for SUB. */
10388 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10389 inst
.instruction
= T_OPCODE_ADD_HI
;
10390 inst
.instruction
|= (Rd
& 8) << 4;
10391 inst
.instruction
|= (Rd
& 7);
10393 inst
.instruction
|= Rn
<< 3;
10395 inst
.instruction
|= Rs
<< 3;
10397 constraint (1, _("dest must overlap one source register"));
10401 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10402 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10403 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10413 Rd
= inst
.operands
[0].reg
;
10414 reject_bad_reg (Rd
);
10416 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10418 /* Defer to section relaxation. */
10419 inst
.relax
= inst
.instruction
;
10420 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10421 inst
.instruction
|= Rd
<< 4;
10423 else if (unified_syntax
&& inst
.size_req
!= 2)
10425 /* Generate a 32-bit opcode. */
10426 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10427 inst
.instruction
|= Rd
<< 8;
10428 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10429 inst
.reloc
.pc_rel
= 1;
10433 /* Generate a 16-bit opcode. */
10434 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10435 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10436 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10437 inst
.reloc
.pc_rel
= 1;
10439 inst
.instruction
|= Rd
<< 4;
10443 /* Arithmetic instructions for which there is just one 16-bit
10444 instruction encoding, and it allows only two low registers.
10445 For maximal compatibility with ARM syntax, we allow three register
10446 operands even when Thumb-32 instructions are not available, as long
10447 as the first two are identical. For instance, both "sbc r0,r1" and
10448 "sbc r0,r0,r1" are allowed. */
10454 Rd
= inst
.operands
[0].reg
;
10455 Rs
= (inst
.operands
[1].present
10456 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10457 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10458 Rn
= inst
.operands
[2].reg
;
10460 reject_bad_reg (Rd
);
10461 reject_bad_reg (Rs
);
10462 if (inst
.operands
[2].isreg
)
10463 reject_bad_reg (Rn
);
10465 if (unified_syntax
)
10467 if (!inst
.operands
[2].isreg
)
10469 /* For an immediate, we always generate a 32-bit opcode;
10470 section relaxation will shrink it later if possible. */
10471 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10472 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10473 inst
.instruction
|= Rd
<< 8;
10474 inst
.instruction
|= Rs
<< 16;
10475 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10479 bfd_boolean narrow
;
10481 /* See if we can do this with a 16-bit instruction. */
10482 if (THUMB_SETS_FLAGS (inst
.instruction
))
10483 narrow
= !in_it_block ();
10485 narrow
= in_it_block ();
10487 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10489 if (inst
.operands
[2].shifted
)
10491 if (inst
.size_req
== 4)
10497 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10498 inst
.instruction
|= Rd
;
10499 inst
.instruction
|= Rn
<< 3;
10503 /* If we get here, it can't be done in 16 bits. */
10504 constraint (inst
.operands
[2].shifted
10505 && inst
.operands
[2].immisreg
,
10506 _("shift must be constant"));
10507 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10508 inst
.instruction
|= Rd
<< 8;
10509 inst
.instruction
|= Rs
<< 16;
10510 encode_thumb32_shifted_operand (2);
10515 /* On its face this is a lie - the instruction does set the
10516 flags. However, the only supported mnemonic in this mode
10517 says it doesn't. */
10518 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10520 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10521 _("unshifted register required"));
10522 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10523 constraint (Rd
!= Rs
,
10524 _("dest and source1 must be the same register"));
10526 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10527 inst
.instruction
|= Rd
;
10528 inst
.instruction
|= Rn
<< 3;
10532 /* Similarly, but for instructions where the arithmetic operation is
10533 commutative, so we can allow either of them to be different from
10534 the destination operand in a 16-bit instruction. For instance, all
10535 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10542 Rd
= inst
.operands
[0].reg
;
10543 Rs
= (inst
.operands
[1].present
10544 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10545 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10546 Rn
= inst
.operands
[2].reg
;
10548 reject_bad_reg (Rd
);
10549 reject_bad_reg (Rs
);
10550 if (inst
.operands
[2].isreg
)
10551 reject_bad_reg (Rn
);
10553 if (unified_syntax
)
10555 if (!inst
.operands
[2].isreg
)
10557 /* For an immediate, we always generate a 32-bit opcode;
10558 section relaxation will shrink it later if possible. */
10559 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10560 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10561 inst
.instruction
|= Rd
<< 8;
10562 inst
.instruction
|= Rs
<< 16;
10563 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10567 bfd_boolean narrow
;
10569 /* See if we can do this with a 16-bit instruction. */
10570 if (THUMB_SETS_FLAGS (inst
.instruction
))
10571 narrow
= !in_it_block ();
10573 narrow
= in_it_block ();
10575 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10577 if (inst
.operands
[2].shifted
)
10579 if (inst
.size_req
== 4)
10586 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10587 inst
.instruction
|= Rd
;
10588 inst
.instruction
|= Rn
<< 3;
10593 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10594 inst
.instruction
|= Rd
;
10595 inst
.instruction
|= Rs
<< 3;
10600 /* If we get here, it can't be done in 16 bits. */
10601 constraint (inst
.operands
[2].shifted
10602 && inst
.operands
[2].immisreg
,
10603 _("shift must be constant"));
10604 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10605 inst
.instruction
|= Rd
<< 8;
10606 inst
.instruction
|= Rs
<< 16;
10607 encode_thumb32_shifted_operand (2);
10612 /* On its face this is a lie - the instruction does set the
10613 flags. However, the only supported mnemonic in this mode
10614 says it doesn't. */
10615 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10617 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10618 _("unshifted register required"));
10619 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10621 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10622 inst
.instruction
|= Rd
;
10625 inst
.instruction
|= Rn
<< 3;
10627 inst
.instruction
|= Rs
<< 3;
10629 constraint (1, _("dest must overlap one source register"));
10637 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10638 constraint (msb
> 32, _("bit-field extends past end of register"));
10639 /* The instruction encoding stores the LSB and MSB,
10640 not the LSB and width. */
10641 Rd
= inst
.operands
[0].reg
;
10642 reject_bad_reg (Rd
);
10643 inst
.instruction
|= Rd
<< 8;
10644 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10645 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10646 inst
.instruction
|= msb
- 1;
10655 Rd
= inst
.operands
[0].reg
;
10656 reject_bad_reg (Rd
);
10658 /* #0 in second position is alternative syntax for bfc, which is
10659 the same instruction but with REG_PC in the Rm field. */
10660 if (!inst
.operands
[1].isreg
)
10664 Rn
= inst
.operands
[1].reg
;
10665 reject_bad_reg (Rn
);
10668 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
10669 constraint (msb
> 32, _("bit-field extends past end of register"));
10670 /* The instruction encoding stores the LSB and MSB,
10671 not the LSB and width. */
10672 inst
.instruction
|= Rd
<< 8;
10673 inst
.instruction
|= Rn
<< 16;
10674 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10675 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10676 inst
.instruction
|= msb
- 1;
10684 Rd
= inst
.operands
[0].reg
;
10685 Rn
= inst
.operands
[1].reg
;
10687 reject_bad_reg (Rd
);
10688 reject_bad_reg (Rn
);
10690 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
10691 _("bit-field extends past end of register"));
10692 inst
.instruction
|= Rd
<< 8;
10693 inst
.instruction
|= Rn
<< 16;
10694 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10695 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10696 inst
.instruction
|= inst
.operands
[3].imm
- 1;
10699 /* ARM V5 Thumb BLX (argument parse)
10700 BLX <target_addr> which is BLX(1)
10701 BLX <Rm> which is BLX(2)
10702 Unfortunately, there are two different opcodes for this mnemonic.
10703 So, the insns[].value is not used, and the code here zaps values
10704 into inst.instruction.
10706 ??? How to take advantage of the additional two bits of displacement
10707 available in Thumb32 mode? Need new relocation? */
10712 set_it_insn_type_last ();
10714 if (inst
.operands
[0].isreg
)
10716 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
10717 /* We have a register, so this is BLX(2). */
10718 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
10722 /* No register. This must be BLX(1). */
10723 inst
.instruction
= 0xf000e800;
10724 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
10736 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
10738 if (in_it_block ())
10740 /* Conditional branches inside IT blocks are encoded as unconditional
10742 cond
= COND_ALWAYS
;
10747 if (cond
!= COND_ALWAYS
)
10748 opcode
= T_MNEM_bcond
;
10750 opcode
= inst
.instruction
;
10753 && (inst
.size_req
== 4
10754 || (inst
.size_req
!= 2
10755 && (inst
.operands
[0].hasreloc
10756 || inst
.reloc
.exp
.X_op
== O_constant
))))
10758 inst
.instruction
= THUMB_OP32(opcode
);
10759 if (cond
== COND_ALWAYS
)
10760 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
10763 gas_assert (cond
!= 0xF);
10764 inst
.instruction
|= cond
<< 22;
10765 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
10770 inst
.instruction
= THUMB_OP16(opcode
);
10771 if (cond
== COND_ALWAYS
)
10772 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
10775 inst
.instruction
|= cond
<< 8;
10776 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
10778 /* Allow section relaxation. */
10779 if (unified_syntax
&& inst
.size_req
!= 2)
10780 inst
.relax
= opcode
;
10782 inst
.reloc
.type
= reloc
;
10783 inst
.reloc
.pc_rel
= 1;
10786 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10787 between the two is the maximum immediate allowed - which is passed in
10790 do_t_bkpt_hlt1 (int range
)
10792 constraint (inst
.cond
!= COND_ALWAYS
,
10793 _("instruction is always unconditional"));
10794 if (inst
.operands
[0].present
)
10796 constraint (inst
.operands
[0].imm
> range
,
10797 _("immediate value out of range"));
10798 inst
.instruction
|= inst
.operands
[0].imm
;
10801 set_it_insn_type (NEUTRAL_IT_INSN
);
10807 do_t_bkpt_hlt1 (63);
10813 do_t_bkpt_hlt1 (255);
10817 do_t_branch23 (void)
10819 set_it_insn_type_last ();
10820 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
10822 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10823 this file. We used to simply ignore the PLT reloc type here --
10824 the branch encoding is now needed to deal with TLSCALL relocs.
10825 So if we see a PLT reloc now, put it back to how it used to be to
10826 keep the preexisting behaviour. */
10827 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
10828 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
10830 #if defined(OBJ_COFF)
10831 /* If the destination of the branch is a defined symbol which does not have
10832 the THUMB_FUNC attribute, then we must be calling a function which has
10833 the (interfacearm) attribute. We look for the Thumb entry point to that
10834 function and change the branch to refer to that function instead. */
10835 if ( inst
.reloc
.exp
.X_op
== O_symbol
10836 && inst
.reloc
.exp
.X_add_symbol
!= NULL
10837 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
10838 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
10839 inst
.reloc
.exp
.X_add_symbol
=
10840 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
10847 set_it_insn_type_last ();
10848 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
10849 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10850 should cause the alignment to be checked once it is known. This is
10851 because BX PC only works if the instruction is word aligned. */
10859 set_it_insn_type_last ();
10860 Rm
= inst
.operands
[0].reg
;
10861 reject_bad_reg (Rm
);
10862 inst
.instruction
|= Rm
<< 16;
10871 Rd
= inst
.operands
[0].reg
;
10872 Rm
= inst
.operands
[1].reg
;
10874 reject_bad_reg (Rd
);
10875 reject_bad_reg (Rm
);
10877 inst
.instruction
|= Rd
<< 8;
10878 inst
.instruction
|= Rm
<< 16;
10879 inst
.instruction
|= Rm
;
10885 set_it_insn_type (OUTSIDE_IT_INSN
);
10886 inst
.instruction
|= inst
.operands
[0].imm
;
10892 set_it_insn_type (OUTSIDE_IT_INSN
);
10894 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
10895 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
10897 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
10898 inst
.instruction
= 0xf3af8000;
10899 inst
.instruction
|= imod
<< 9;
10900 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
10901 if (inst
.operands
[1].present
)
10902 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
10906 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
10907 && (inst
.operands
[0].imm
& 4),
10908 _("selected processor does not support 'A' form "
10909 "of this instruction"));
10910 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
10911 _("Thumb does not support the 2-argument "
10912 "form of this instruction"));
10913 inst
.instruction
|= inst
.operands
[0].imm
;
10917 /* THUMB CPY instruction (argument parse). */
10922 if (inst
.size_req
== 4)
10924 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
10925 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10926 inst
.instruction
|= inst
.operands
[1].reg
;
10930 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
10931 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
10932 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10939 set_it_insn_type (OUTSIDE_IT_INSN
);
10940 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
10941 inst
.instruction
|= inst
.operands
[0].reg
;
10942 inst
.reloc
.pc_rel
= 1;
10943 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
10949 inst
.instruction
|= inst
.operands
[0].imm
;
10955 unsigned Rd
, Rn
, Rm
;
10957 Rd
= inst
.operands
[0].reg
;
10958 Rn
= (inst
.operands
[1].present
10959 ? inst
.operands
[1].reg
: Rd
);
10960 Rm
= inst
.operands
[2].reg
;
10962 reject_bad_reg (Rd
);
10963 reject_bad_reg (Rn
);
10964 reject_bad_reg (Rm
);
10966 inst
.instruction
|= Rd
<< 8;
10967 inst
.instruction
|= Rn
<< 16;
10968 inst
.instruction
|= Rm
;
10974 if (unified_syntax
&& inst
.size_req
== 4)
10975 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10977 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10983 unsigned int cond
= inst
.operands
[0].imm
;
10985 set_it_insn_type (IT_INSN
);
10986 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
10988 now_it
.warn_deprecated
= FALSE
;
10990 /* If the condition is a negative condition, invert the mask. */
10991 if ((cond
& 0x1) == 0x0)
10993 unsigned int mask
= inst
.instruction
& 0x000f;
10995 if ((mask
& 0x7) == 0)
10997 /* No conversion needed. */
10998 now_it
.block_length
= 1;
11000 else if ((mask
& 0x3) == 0)
11003 now_it
.block_length
= 2;
11005 else if ((mask
& 0x1) == 0)
11008 now_it
.block_length
= 3;
11013 now_it
.block_length
= 4;
11016 inst
.instruction
&= 0xfff0;
11017 inst
.instruction
|= mask
;
11020 inst
.instruction
|= cond
<< 4;
11023 /* Helper function used for both push/pop and ldm/stm. */
11025 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11029 load
= (inst
.instruction
& (1 << 20)) != 0;
11031 if (mask
& (1 << 13))
11032 inst
.error
= _("SP not allowed in register list");
11034 if ((mask
& (1 << base
)) != 0
11036 inst
.error
= _("having the base register in the register list when "
11037 "using write back is UNPREDICTABLE");
11041 if (mask
& (1 << 15))
11043 if (mask
& (1 << 14))
11044 inst
.error
= _("LR and PC should not both be in register list");
11046 set_it_insn_type_last ();
11051 if (mask
& (1 << 15))
11052 inst
.error
= _("PC not allowed in register list");
11055 if ((mask
& (mask
- 1)) == 0)
11057 /* Single register transfers implemented as str/ldr. */
11060 if (inst
.instruction
& (1 << 23))
11061 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11063 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11067 if (inst
.instruction
& (1 << 23))
11068 inst
.instruction
= 0x00800000; /* ia -> [base] */
11070 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11073 inst
.instruction
|= 0xf8400000;
11075 inst
.instruction
|= 0x00100000;
11077 mask
= ffs (mask
) - 1;
11080 else if (writeback
)
11081 inst
.instruction
|= WRITE_BACK
;
11083 inst
.instruction
|= mask
;
11084 inst
.instruction
|= base
<< 16;
11090 /* This really doesn't seem worth it. */
11091 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11092 _("expression too complex"));
11093 constraint (inst
.operands
[1].writeback
,
11094 _("Thumb load/store multiple does not support {reglist}^"));
11096 if (unified_syntax
)
11098 bfd_boolean narrow
;
11102 /* See if we can use a 16-bit instruction. */
11103 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11104 && inst
.size_req
!= 4
11105 && !(inst
.operands
[1].imm
& ~0xff))
11107 mask
= 1 << inst
.operands
[0].reg
;
11109 if (inst
.operands
[0].reg
<= 7)
11111 if (inst
.instruction
== T_MNEM_stmia
11112 ? inst
.operands
[0].writeback
11113 : (inst
.operands
[0].writeback
11114 == !(inst
.operands
[1].imm
& mask
)))
11116 if (inst
.instruction
== T_MNEM_stmia
11117 && (inst
.operands
[1].imm
& mask
)
11118 && (inst
.operands
[1].imm
& (mask
- 1)))
11119 as_warn (_("value stored for r%d is UNKNOWN"),
11120 inst
.operands
[0].reg
);
11122 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11123 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11124 inst
.instruction
|= inst
.operands
[1].imm
;
11127 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11129 /* This means 1 register in reg list one of 3 situations:
11130 1. Instruction is stmia, but without writeback.
11131 2. lmdia without writeback, but with Rn not in
11133 3. ldmia with writeback, but with Rn in reglist.
11134 Case 3 is UNPREDICTABLE behaviour, so we handle
11135 case 1 and 2 which can be converted into a 16-bit
11136 str or ldr. The SP cases are handled below. */
11137 unsigned long opcode
;
11138 /* First, record an error for Case 3. */
11139 if (inst
.operands
[1].imm
& mask
11140 && inst
.operands
[0].writeback
)
11142 _("having the base register in the register list when "
11143 "using write back is UNPREDICTABLE");
11145 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11147 inst
.instruction
= THUMB_OP16 (opcode
);
11148 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11149 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11153 else if (inst
.operands
[0] .reg
== REG_SP
)
11155 if (inst
.operands
[0].writeback
)
11158 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11159 ? T_MNEM_push
: T_MNEM_pop
);
11160 inst
.instruction
|= inst
.operands
[1].imm
;
11163 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11166 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11167 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11168 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11176 if (inst
.instruction
< 0xffff)
11177 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11179 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11180 inst
.operands
[0].writeback
);
11185 constraint (inst
.operands
[0].reg
> 7
11186 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11187 constraint (inst
.instruction
!= T_MNEM_ldmia
11188 && inst
.instruction
!= T_MNEM_stmia
,
11189 _("Thumb-2 instruction only valid in unified syntax"));
11190 if (inst
.instruction
== T_MNEM_stmia
)
11192 if (!inst
.operands
[0].writeback
)
11193 as_warn (_("this instruction will write back the base register"));
11194 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11195 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11196 as_warn (_("value stored for r%d is UNKNOWN"),
11197 inst
.operands
[0].reg
);
11201 if (!inst
.operands
[0].writeback
11202 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11203 as_warn (_("this instruction will write back the base register"));
11204 else if (inst
.operands
[0].writeback
11205 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11206 as_warn (_("this instruction will not write back the base register"));
11209 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11210 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11211 inst
.instruction
|= inst
.operands
[1].imm
;
11218 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11219 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11220 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11221 || inst
.operands
[1].negative
,
11224 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11226 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11227 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11228 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11234 if (!inst
.operands
[1].present
)
11236 constraint (inst
.operands
[0].reg
== REG_LR
,
11237 _("r14 not allowed as first register "
11238 "when second register is omitted"));
11239 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11241 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11244 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11245 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11246 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11252 unsigned long opcode
;
11255 if (inst
.operands
[0].isreg
11256 && !inst
.operands
[0].preind
11257 && inst
.operands
[0].reg
== REG_PC
)
11258 set_it_insn_type_last ();
11260 opcode
= inst
.instruction
;
11261 if (unified_syntax
)
11263 if (!inst
.operands
[1].isreg
)
11265 if (opcode
<= 0xffff)
11266 inst
.instruction
= THUMB_OP32 (opcode
);
11267 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11270 if (inst
.operands
[1].isreg
11271 && !inst
.operands
[1].writeback
11272 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11273 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11274 && opcode
<= 0xffff
11275 && inst
.size_req
!= 4)
11277 /* Insn may have a 16-bit form. */
11278 Rn
= inst
.operands
[1].reg
;
11279 if (inst
.operands
[1].immisreg
)
11281 inst
.instruction
= THUMB_OP16 (opcode
);
11283 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11285 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11286 reject_bad_reg (inst
.operands
[1].imm
);
11288 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11289 && opcode
!= T_MNEM_ldrsb
)
11290 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11291 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11298 if (inst
.reloc
.pc_rel
)
11299 opcode
= T_MNEM_ldr_pc2
;
11301 opcode
= T_MNEM_ldr_pc
;
11305 if (opcode
== T_MNEM_ldr
)
11306 opcode
= T_MNEM_ldr_sp
;
11308 opcode
= T_MNEM_str_sp
;
11310 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11314 inst
.instruction
= inst
.operands
[0].reg
;
11315 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11317 inst
.instruction
|= THUMB_OP16 (opcode
);
11318 if (inst
.size_req
== 2)
11319 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11321 inst
.relax
= opcode
;
11325 /* Definitely a 32-bit variant. */
11327 /* Warning for Erratum 752419. */
11328 if (opcode
== T_MNEM_ldr
11329 && inst
.operands
[0].reg
== REG_SP
11330 && inst
.operands
[1].writeback
== 1
11331 && !inst
.operands
[1].immisreg
)
11333 if (no_cpu_selected ()
11334 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11335 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11336 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11337 as_warn (_("This instruction may be unpredictable "
11338 "if executed on M-profile cores "
11339 "with interrupts enabled."));
11342 /* Do some validations regarding addressing modes. */
11343 if (inst
.operands
[1].immisreg
)
11344 reject_bad_reg (inst
.operands
[1].imm
);
11346 constraint (inst
.operands
[1].writeback
== 1
11347 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11350 inst
.instruction
= THUMB_OP32 (opcode
);
11351 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11352 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11353 check_ldr_r15_aligned ();
11357 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11359 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11361 /* Only [Rn,Rm] is acceptable. */
11362 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11363 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11364 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11365 || inst
.operands
[1].negative
,
11366 _("Thumb does not support this addressing mode"));
11367 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11371 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11372 if (!inst
.operands
[1].isreg
)
11373 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11376 constraint (!inst
.operands
[1].preind
11377 || inst
.operands
[1].shifted
11378 || inst
.operands
[1].writeback
,
11379 _("Thumb does not support this addressing mode"));
11380 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11382 constraint (inst
.instruction
& 0x0600,
11383 _("byte or halfword not valid for base register"));
11384 constraint (inst
.operands
[1].reg
== REG_PC
11385 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11386 _("r15 based store not allowed"));
11387 constraint (inst
.operands
[1].immisreg
,
11388 _("invalid base register for register offset"));
11390 if (inst
.operands
[1].reg
== REG_PC
)
11391 inst
.instruction
= T_OPCODE_LDR_PC
;
11392 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11393 inst
.instruction
= T_OPCODE_LDR_SP
;
11395 inst
.instruction
= T_OPCODE_STR_SP
;
11397 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11398 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11402 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11403 if (!inst
.operands
[1].immisreg
)
11405 /* Immediate offset. */
11406 inst
.instruction
|= inst
.operands
[0].reg
;
11407 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11408 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11412 /* Register offset. */
11413 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11414 constraint (inst
.operands
[1].negative
,
11415 _("Thumb does not support this addressing mode"));
11418 switch (inst
.instruction
)
11420 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11421 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11422 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11423 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11424 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11425 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11426 case 0x5600 /* ldrsb */:
11427 case 0x5e00 /* ldrsh */: break;
11431 inst
.instruction
|= inst
.operands
[0].reg
;
11432 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11433 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11439 if (!inst
.operands
[1].present
)
11441 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11442 constraint (inst
.operands
[0].reg
== REG_LR
,
11443 _("r14 not allowed here"));
11444 constraint (inst
.operands
[0].reg
== REG_R12
,
11445 _("r12 not allowed here"));
11448 if (inst
.operands
[2].writeback
11449 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11450 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11451 as_warn (_("base register written back, and overlaps "
11452 "one of transfer registers"));
11454 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11455 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11456 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11462 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11463 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11469 unsigned Rd
, Rn
, Rm
, Ra
;
11471 Rd
= inst
.operands
[0].reg
;
11472 Rn
= inst
.operands
[1].reg
;
11473 Rm
= inst
.operands
[2].reg
;
11474 Ra
= inst
.operands
[3].reg
;
11476 reject_bad_reg (Rd
);
11477 reject_bad_reg (Rn
);
11478 reject_bad_reg (Rm
);
11479 reject_bad_reg (Ra
);
11481 inst
.instruction
|= Rd
<< 8;
11482 inst
.instruction
|= Rn
<< 16;
11483 inst
.instruction
|= Rm
;
11484 inst
.instruction
|= Ra
<< 12;
11490 unsigned RdLo
, RdHi
, Rn
, Rm
;
11492 RdLo
= inst
.operands
[0].reg
;
11493 RdHi
= inst
.operands
[1].reg
;
11494 Rn
= inst
.operands
[2].reg
;
11495 Rm
= inst
.operands
[3].reg
;
11497 reject_bad_reg (RdLo
);
11498 reject_bad_reg (RdHi
);
11499 reject_bad_reg (Rn
);
11500 reject_bad_reg (Rm
);
11502 inst
.instruction
|= RdLo
<< 12;
11503 inst
.instruction
|= RdHi
<< 8;
11504 inst
.instruction
|= Rn
<< 16;
11505 inst
.instruction
|= Rm
;
11509 do_t_mov_cmp (void)
11513 Rn
= inst
.operands
[0].reg
;
11514 Rm
= inst
.operands
[1].reg
;
11517 set_it_insn_type_last ();
11519 if (unified_syntax
)
11521 int r0off
= (inst
.instruction
== T_MNEM_mov
11522 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11523 unsigned long opcode
;
11524 bfd_boolean narrow
;
11525 bfd_boolean low_regs
;
11527 low_regs
= (Rn
<= 7 && Rm
<= 7);
11528 opcode
= inst
.instruction
;
11529 if (in_it_block ())
11530 narrow
= opcode
!= T_MNEM_movs
;
11532 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11533 if (inst
.size_req
== 4
11534 || inst
.operands
[1].shifted
)
11537 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11538 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11539 && !inst
.operands
[1].shifted
11543 inst
.instruction
= T2_SUBS_PC_LR
;
11547 if (opcode
== T_MNEM_cmp
)
11549 constraint (Rn
== REG_PC
, BAD_PC
);
11552 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11554 warn_deprecated_sp (Rm
);
11555 /* R15 was documented as a valid choice for Rm in ARMv6,
11556 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11557 tools reject R15, so we do too. */
11558 constraint (Rm
== REG_PC
, BAD_PC
);
11561 reject_bad_reg (Rm
);
11563 else if (opcode
== T_MNEM_mov
11564 || opcode
== T_MNEM_movs
)
11566 if (inst
.operands
[1].isreg
)
11568 if (opcode
== T_MNEM_movs
)
11570 reject_bad_reg (Rn
);
11571 reject_bad_reg (Rm
);
11575 /* This is mov.n. */
11576 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11577 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11579 as_tsktsk (_("Use of r%u as a source register is "
11580 "deprecated when r%u is the destination "
11581 "register."), Rm
, Rn
);
11586 /* This is mov.w. */
11587 constraint (Rn
== REG_PC
, BAD_PC
);
11588 constraint (Rm
== REG_PC
, BAD_PC
);
11589 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11593 reject_bad_reg (Rn
);
11596 if (!inst
.operands
[1].isreg
)
11598 /* Immediate operand. */
11599 if (!in_it_block () && opcode
== T_MNEM_mov
)
11601 if (low_regs
&& narrow
)
11603 inst
.instruction
= THUMB_OP16 (opcode
);
11604 inst
.instruction
|= Rn
<< 8;
11605 if (inst
.size_req
== 2)
11606 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11608 inst
.relax
= opcode
;
11612 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11613 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11614 inst
.instruction
|= Rn
<< r0off
;
11615 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11618 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11619 && (inst
.instruction
== T_MNEM_mov
11620 || inst
.instruction
== T_MNEM_movs
))
11622 /* Register shifts are encoded as separate shift instructions. */
11623 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11625 if (in_it_block ())
11630 if (inst
.size_req
== 4)
11633 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11639 switch (inst
.operands
[1].shift_kind
)
11642 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11645 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11648 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11651 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
11657 inst
.instruction
= opcode
;
11660 inst
.instruction
|= Rn
;
11661 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
11666 inst
.instruction
|= CONDS_BIT
;
11668 inst
.instruction
|= Rn
<< 8;
11669 inst
.instruction
|= Rm
<< 16;
11670 inst
.instruction
|= inst
.operands
[1].imm
;
11675 /* Some mov with immediate shift have narrow variants.
11676 Register shifts are handled above. */
11677 if (low_regs
&& inst
.operands
[1].shifted
11678 && (inst
.instruction
== T_MNEM_mov
11679 || inst
.instruction
== T_MNEM_movs
))
11681 if (in_it_block ())
11682 narrow
= (inst
.instruction
== T_MNEM_mov
);
11684 narrow
= (inst
.instruction
== T_MNEM_movs
);
11689 switch (inst
.operands
[1].shift_kind
)
11691 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11692 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11693 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11694 default: narrow
= FALSE
; break;
11700 inst
.instruction
|= Rn
;
11701 inst
.instruction
|= Rm
<< 3;
11702 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11706 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11707 inst
.instruction
|= Rn
<< r0off
;
11708 encode_thumb32_shifted_operand (1);
11712 switch (inst
.instruction
)
11715 /* In v4t or v5t a move of two lowregs produces unpredictable
11716 results. Don't allow this. */
11719 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
11720 "MOV Rd, Rs with two low registers is not "
11721 "permitted on this architecture");
11722 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
11726 inst
.instruction
= T_OPCODE_MOV_HR
;
11727 inst
.instruction
|= (Rn
& 0x8) << 4;
11728 inst
.instruction
|= (Rn
& 0x7);
11729 inst
.instruction
|= Rm
<< 3;
11733 /* We know we have low registers at this point.
11734 Generate LSLS Rd, Rs, #0. */
11735 inst
.instruction
= T_OPCODE_LSL_I
;
11736 inst
.instruction
|= Rn
;
11737 inst
.instruction
|= Rm
<< 3;
11743 inst
.instruction
= T_OPCODE_CMP_LR
;
11744 inst
.instruction
|= Rn
;
11745 inst
.instruction
|= Rm
<< 3;
11749 inst
.instruction
= T_OPCODE_CMP_HR
;
11750 inst
.instruction
|= (Rn
& 0x8) << 4;
11751 inst
.instruction
|= (Rn
& 0x7);
11752 inst
.instruction
|= Rm
<< 3;
11759 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11761 /* PR 10443: Do not silently ignore shifted operands. */
11762 constraint (inst
.operands
[1].shifted
,
11763 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11765 if (inst
.operands
[1].isreg
)
11767 if (Rn
< 8 && Rm
< 8)
11769 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11770 since a MOV instruction produces unpredictable results. */
11771 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11772 inst
.instruction
= T_OPCODE_ADD_I3
;
11774 inst
.instruction
= T_OPCODE_CMP_LR
;
11776 inst
.instruction
|= Rn
;
11777 inst
.instruction
|= Rm
<< 3;
11781 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11782 inst
.instruction
= T_OPCODE_MOV_HR
;
11784 inst
.instruction
= T_OPCODE_CMP_HR
;
11790 constraint (Rn
> 7,
11791 _("only lo regs allowed with immediate"));
11792 inst
.instruction
|= Rn
<< 8;
11793 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11804 top
= (inst
.instruction
& 0x00800000) != 0;
11805 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
11807 constraint (top
, _(":lower16: not allowed this instruction"));
11808 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
11810 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
11812 constraint (!top
, _(":upper16: not allowed this instruction"));
11813 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
11816 Rd
= inst
.operands
[0].reg
;
11817 reject_bad_reg (Rd
);
11819 inst
.instruction
|= Rd
<< 8;
11820 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
11822 imm
= inst
.reloc
.exp
.X_add_number
;
11823 inst
.instruction
|= (imm
& 0xf000) << 4;
11824 inst
.instruction
|= (imm
& 0x0800) << 15;
11825 inst
.instruction
|= (imm
& 0x0700) << 4;
11826 inst
.instruction
|= (imm
& 0x00ff);
11831 do_t_mvn_tst (void)
11835 Rn
= inst
.operands
[0].reg
;
11836 Rm
= inst
.operands
[1].reg
;
11838 if (inst
.instruction
== T_MNEM_cmp
11839 || inst
.instruction
== T_MNEM_cmn
)
11840 constraint (Rn
== REG_PC
, BAD_PC
);
11842 reject_bad_reg (Rn
);
11843 reject_bad_reg (Rm
);
11845 if (unified_syntax
)
11847 int r0off
= (inst
.instruction
== T_MNEM_mvn
11848 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
11849 bfd_boolean narrow
;
11851 if (inst
.size_req
== 4
11852 || inst
.instruction
> 0xffff
11853 || inst
.operands
[1].shifted
11854 || Rn
> 7 || Rm
> 7)
11856 else if (inst
.instruction
== T_MNEM_cmn
11857 || inst
.instruction
== T_MNEM_tst
)
11859 else if (THUMB_SETS_FLAGS (inst
.instruction
))
11860 narrow
= !in_it_block ();
11862 narrow
= in_it_block ();
11864 if (!inst
.operands
[1].isreg
)
11866 /* For an immediate, we always generate a 32-bit opcode;
11867 section relaxation will shrink it later if possible. */
11868 if (inst
.instruction
< 0xffff)
11869 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11870 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11871 inst
.instruction
|= Rn
<< r0off
;
11872 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11876 /* See if we can do this with a 16-bit instruction. */
11879 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11880 inst
.instruction
|= Rn
;
11881 inst
.instruction
|= Rm
<< 3;
11885 constraint (inst
.operands
[1].shifted
11886 && inst
.operands
[1].immisreg
,
11887 _("shift must be constant"));
11888 if (inst
.instruction
< 0xffff)
11889 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11890 inst
.instruction
|= Rn
<< r0off
;
11891 encode_thumb32_shifted_operand (1);
11897 constraint (inst
.instruction
> 0xffff
11898 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
11899 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
11900 _("unshifted register required"));
11901 constraint (Rn
> 7 || Rm
> 7,
11904 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11905 inst
.instruction
|= Rn
;
11906 inst
.instruction
|= Rm
<< 3;
11915 if (do_vfp_nsyn_mrs () == SUCCESS
)
11918 Rd
= inst
.operands
[0].reg
;
11919 reject_bad_reg (Rd
);
11920 inst
.instruction
|= Rd
<< 8;
11922 if (inst
.operands
[1].isreg
)
11924 unsigned br
= inst
.operands
[1].reg
;
11925 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
11926 as_bad (_("bad register for mrs"));
11928 inst
.instruction
|= br
& (0xf << 16);
11929 inst
.instruction
|= (br
& 0x300) >> 4;
11930 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
11934 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
11936 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
11938 /* PR gas/12698: The constraint is only applied for m_profile.
11939 If the user has specified -march=all, we want to ignore it as
11940 we are building for any CPU type, including non-m variants. */
11941 bfd_boolean m_profile
=
11942 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
11943 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
11944 "not support requested special purpose register"));
11947 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11949 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
11950 _("'APSR', 'CPSR' or 'SPSR' expected"));
11952 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
11953 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
11954 inst
.instruction
|= 0xf0000;
11964 if (do_vfp_nsyn_msr () == SUCCESS
)
11967 constraint (!inst
.operands
[1].isreg
,
11968 _("Thumb encoding does not support an immediate here"));
11970 if (inst
.operands
[0].isreg
)
11971 flags
= (int)(inst
.operands
[0].reg
);
11973 flags
= inst
.operands
[0].imm
;
11975 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
11977 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
11979 /* PR gas/12698: The constraint is only applied for m_profile.
11980 If the user has specified -march=all, we want to ignore it as
11981 we are building for any CPU type, including non-m variants. */
11982 bfd_boolean m_profile
=
11983 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
11984 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
11985 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
11986 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
11987 && bits
!= PSR_f
)) && m_profile
,
11988 _("selected processor does not support requested special "
11989 "purpose register"));
11992 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
11993 "requested special purpose register"));
11995 Rn
= inst
.operands
[1].reg
;
11996 reject_bad_reg (Rn
);
11998 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
11999 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12000 inst
.instruction
|= (flags
& 0x300) >> 4;
12001 inst
.instruction
|= (flags
& 0xff);
12002 inst
.instruction
|= Rn
<< 16;
12008 bfd_boolean narrow
;
12009 unsigned Rd
, Rn
, Rm
;
12011 if (!inst
.operands
[2].present
)
12012 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12014 Rd
= inst
.operands
[0].reg
;
12015 Rn
= inst
.operands
[1].reg
;
12016 Rm
= inst
.operands
[2].reg
;
12018 if (unified_syntax
)
12020 if (inst
.size_req
== 4
12026 else if (inst
.instruction
== T_MNEM_muls
)
12027 narrow
= !in_it_block ();
12029 narrow
= in_it_block ();
12033 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12034 constraint (Rn
> 7 || Rm
> 7,
12041 /* 16-bit MULS/Conditional MUL. */
12042 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12043 inst
.instruction
|= Rd
;
12046 inst
.instruction
|= Rm
<< 3;
12048 inst
.instruction
|= Rn
<< 3;
12050 constraint (1, _("dest must overlap one source register"));
12054 constraint (inst
.instruction
!= T_MNEM_mul
,
12055 _("Thumb-2 MUL must not set flags"));
12057 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12058 inst
.instruction
|= Rd
<< 8;
12059 inst
.instruction
|= Rn
<< 16;
12060 inst
.instruction
|= Rm
<< 0;
12062 reject_bad_reg (Rd
);
12063 reject_bad_reg (Rn
);
12064 reject_bad_reg (Rm
);
12071 unsigned RdLo
, RdHi
, Rn
, Rm
;
12073 RdLo
= inst
.operands
[0].reg
;
12074 RdHi
= inst
.operands
[1].reg
;
12075 Rn
= inst
.operands
[2].reg
;
12076 Rm
= inst
.operands
[3].reg
;
12078 reject_bad_reg (RdLo
);
12079 reject_bad_reg (RdHi
);
12080 reject_bad_reg (Rn
);
12081 reject_bad_reg (Rm
);
12083 inst
.instruction
|= RdLo
<< 12;
12084 inst
.instruction
|= RdHi
<< 8;
12085 inst
.instruction
|= Rn
<< 16;
12086 inst
.instruction
|= Rm
;
12089 as_tsktsk (_("rdhi and rdlo must be different"));
12095 set_it_insn_type (NEUTRAL_IT_INSN
);
12097 if (unified_syntax
)
12099 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12101 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12102 inst
.instruction
|= inst
.operands
[0].imm
;
12106 /* PR9722: Check for Thumb2 availability before
12107 generating a thumb2 nop instruction. */
12108 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12110 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12111 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12114 inst
.instruction
= 0x46c0;
12119 constraint (inst
.operands
[0].present
,
12120 _("Thumb does not support NOP with hints"));
12121 inst
.instruction
= 0x46c0;
12128 if (unified_syntax
)
12130 bfd_boolean narrow
;
12132 if (THUMB_SETS_FLAGS (inst
.instruction
))
12133 narrow
= !in_it_block ();
12135 narrow
= in_it_block ();
12136 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12138 if (inst
.size_req
== 4)
12143 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12144 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12145 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12149 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12150 inst
.instruction
|= inst
.operands
[0].reg
;
12151 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12156 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12158 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12160 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12161 inst
.instruction
|= inst
.operands
[0].reg
;
12162 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12171 Rd
= inst
.operands
[0].reg
;
12172 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12174 reject_bad_reg (Rd
);
12175 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12176 reject_bad_reg (Rn
);
12178 inst
.instruction
|= Rd
<< 8;
12179 inst
.instruction
|= Rn
<< 16;
12181 if (!inst
.operands
[2].isreg
)
12183 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12184 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12190 Rm
= inst
.operands
[2].reg
;
12191 reject_bad_reg (Rm
);
12193 constraint (inst
.operands
[2].shifted
12194 && inst
.operands
[2].immisreg
,
12195 _("shift must be constant"));
12196 encode_thumb32_shifted_operand (2);
12203 unsigned Rd
, Rn
, Rm
;
12205 Rd
= inst
.operands
[0].reg
;
12206 Rn
= inst
.operands
[1].reg
;
12207 Rm
= inst
.operands
[2].reg
;
12209 reject_bad_reg (Rd
);
12210 reject_bad_reg (Rn
);
12211 reject_bad_reg (Rm
);
12213 inst
.instruction
|= Rd
<< 8;
12214 inst
.instruction
|= Rn
<< 16;
12215 inst
.instruction
|= Rm
;
12216 if (inst
.operands
[3].present
)
12218 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12219 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12220 _("expression too complex"));
12221 inst
.instruction
|= (val
& 0x1c) << 10;
12222 inst
.instruction
|= (val
& 0x03) << 6;
12229 if (!inst
.operands
[3].present
)
12233 inst
.instruction
&= ~0x00000020;
12235 /* PR 10168. Swap the Rm and Rn registers. */
12236 Rtmp
= inst
.operands
[1].reg
;
12237 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12238 inst
.operands
[2].reg
= Rtmp
;
12246 if (inst
.operands
[0].immisreg
)
12247 reject_bad_reg (inst
.operands
[0].imm
);
12249 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12253 do_t_push_pop (void)
12257 constraint (inst
.operands
[0].writeback
,
12258 _("push/pop do not support {reglist}^"));
12259 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12260 _("expression too complex"));
12262 mask
= inst
.operands
[0].imm
;
12263 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12264 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12265 else if (inst
.size_req
!= 4
12266 && (mask
& ~0xff) == (1 << (inst
.instruction
== T_MNEM_push
12267 ? REG_LR
: REG_PC
)))
12269 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12270 inst
.instruction
|= THUMB_PP_PC_LR
;
12271 inst
.instruction
|= mask
& 0xff;
12273 else if (unified_syntax
)
12275 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12276 encode_thumb2_ldmstm (13, mask
, TRUE
);
12280 inst
.error
= _("invalid register list to push/pop instruction");
12290 Rd
= inst
.operands
[0].reg
;
12291 Rm
= inst
.operands
[1].reg
;
12293 reject_bad_reg (Rd
);
12294 reject_bad_reg (Rm
);
12296 inst
.instruction
|= Rd
<< 8;
12297 inst
.instruction
|= Rm
<< 16;
12298 inst
.instruction
|= Rm
;
12306 Rd
= inst
.operands
[0].reg
;
12307 Rm
= inst
.operands
[1].reg
;
12309 reject_bad_reg (Rd
);
12310 reject_bad_reg (Rm
);
12312 if (Rd
<= 7 && Rm
<= 7
12313 && inst
.size_req
!= 4)
12315 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12316 inst
.instruction
|= Rd
;
12317 inst
.instruction
|= Rm
<< 3;
12319 else if (unified_syntax
)
12321 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12322 inst
.instruction
|= Rd
<< 8;
12323 inst
.instruction
|= Rm
<< 16;
12324 inst
.instruction
|= Rm
;
12327 inst
.error
= BAD_HIREG
;
12335 Rd
= inst
.operands
[0].reg
;
12336 Rm
= inst
.operands
[1].reg
;
12338 reject_bad_reg (Rd
);
12339 reject_bad_reg (Rm
);
12341 inst
.instruction
|= Rd
<< 8;
12342 inst
.instruction
|= Rm
;
12350 Rd
= inst
.operands
[0].reg
;
12351 Rs
= (inst
.operands
[1].present
12352 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12353 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12355 reject_bad_reg (Rd
);
12356 reject_bad_reg (Rs
);
12357 if (inst
.operands
[2].isreg
)
12358 reject_bad_reg (inst
.operands
[2].reg
);
12360 inst
.instruction
|= Rd
<< 8;
12361 inst
.instruction
|= Rs
<< 16;
12362 if (!inst
.operands
[2].isreg
)
12364 bfd_boolean narrow
;
12366 if ((inst
.instruction
& 0x00100000) != 0)
12367 narrow
= !in_it_block ();
12369 narrow
= in_it_block ();
12371 if (Rd
> 7 || Rs
> 7)
12374 if (inst
.size_req
== 4 || !unified_syntax
)
12377 if (inst
.reloc
.exp
.X_op
!= O_constant
12378 || inst
.reloc
.exp
.X_add_number
!= 0)
12381 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12382 relaxation, but it doesn't seem worth the hassle. */
12385 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12386 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12387 inst
.instruction
|= Rs
<< 3;
12388 inst
.instruction
|= Rd
;
12392 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12393 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12397 encode_thumb32_shifted_operand (2);
12403 if (warn_on_deprecated
12404 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12405 as_tsktsk (_("setend use is deprecated for ARMv8"));
12407 set_it_insn_type (OUTSIDE_IT_INSN
);
12408 if (inst
.operands
[0].imm
)
12409 inst
.instruction
|= 0x8;
12415 if (!inst
.operands
[1].present
)
12416 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12418 if (unified_syntax
)
12420 bfd_boolean narrow
;
12423 switch (inst
.instruction
)
12426 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12428 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12430 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12432 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12436 if (THUMB_SETS_FLAGS (inst
.instruction
))
12437 narrow
= !in_it_block ();
12439 narrow
= in_it_block ();
12440 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12442 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12444 if (inst
.operands
[2].isreg
12445 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12446 || inst
.operands
[2].reg
> 7))
12448 if (inst
.size_req
== 4)
12451 reject_bad_reg (inst
.operands
[0].reg
);
12452 reject_bad_reg (inst
.operands
[1].reg
);
12456 if (inst
.operands
[2].isreg
)
12458 reject_bad_reg (inst
.operands
[2].reg
);
12459 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12460 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12461 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12462 inst
.instruction
|= inst
.operands
[2].reg
;
12464 /* PR 12854: Error on extraneous shifts. */
12465 constraint (inst
.operands
[2].shifted
,
12466 _("extraneous shift as part of operand to shift insn"));
12470 inst
.operands
[1].shifted
= 1;
12471 inst
.operands
[1].shift_kind
= shift_kind
;
12472 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12473 ? T_MNEM_movs
: T_MNEM_mov
);
12474 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12475 encode_thumb32_shifted_operand (1);
12476 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12477 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12482 if (inst
.operands
[2].isreg
)
12484 switch (shift_kind
)
12486 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12487 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12488 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12489 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12493 inst
.instruction
|= inst
.operands
[0].reg
;
12494 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12496 /* PR 12854: Error on extraneous shifts. */
12497 constraint (inst
.operands
[2].shifted
,
12498 _("extraneous shift as part of operand to shift insn"));
12502 switch (shift_kind
)
12504 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12505 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12506 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12509 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12510 inst
.instruction
|= inst
.operands
[0].reg
;
12511 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12517 constraint (inst
.operands
[0].reg
> 7
12518 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12519 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12521 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12523 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12524 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12525 _("source1 and dest must be same register"));
12527 switch (inst
.instruction
)
12529 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12530 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12531 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12532 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12536 inst
.instruction
|= inst
.operands
[0].reg
;
12537 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12539 /* PR 12854: Error on extraneous shifts. */
12540 constraint (inst
.operands
[2].shifted
,
12541 _("extraneous shift as part of operand to shift insn"));
12545 switch (inst
.instruction
)
12547 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12548 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12549 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12550 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12553 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12554 inst
.instruction
|= inst
.operands
[0].reg
;
12555 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12563 unsigned Rd
, Rn
, Rm
;
12565 Rd
= inst
.operands
[0].reg
;
12566 Rn
= inst
.operands
[1].reg
;
12567 Rm
= inst
.operands
[2].reg
;
12569 reject_bad_reg (Rd
);
12570 reject_bad_reg (Rn
);
12571 reject_bad_reg (Rm
);
12573 inst
.instruction
|= Rd
<< 8;
12574 inst
.instruction
|= Rn
<< 16;
12575 inst
.instruction
|= Rm
;
12581 unsigned Rd
, Rn
, Rm
;
12583 Rd
= inst
.operands
[0].reg
;
12584 Rm
= inst
.operands
[1].reg
;
12585 Rn
= inst
.operands
[2].reg
;
12587 reject_bad_reg (Rd
);
12588 reject_bad_reg (Rn
);
12589 reject_bad_reg (Rm
);
12591 inst
.instruction
|= Rd
<< 8;
12592 inst
.instruction
|= Rn
<< 16;
12593 inst
.instruction
|= Rm
;
12599 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12600 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12601 _("SMC is not permitted on this architecture"));
12602 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12603 _("expression too complex"));
12604 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12605 inst
.instruction
|= (value
& 0xf000) >> 12;
12606 inst
.instruction
|= (value
& 0x0ff0);
12607 inst
.instruction
|= (value
& 0x000f) << 16;
12608 /* PR gas/15623: SMC instructions must be last in an IT block. */
12609 set_it_insn_type_last ();
12615 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12617 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12618 inst
.instruction
|= (value
& 0x0fff);
12619 inst
.instruction
|= (value
& 0xf000) << 4;
12623 do_t_ssat_usat (int bias
)
12627 Rd
= inst
.operands
[0].reg
;
12628 Rn
= inst
.operands
[2].reg
;
12630 reject_bad_reg (Rd
);
12631 reject_bad_reg (Rn
);
12633 inst
.instruction
|= Rd
<< 8;
12634 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12635 inst
.instruction
|= Rn
<< 16;
12637 if (inst
.operands
[3].present
)
12639 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12641 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12643 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12644 _("expression too complex"));
12646 if (shift_amount
!= 0)
12648 constraint (shift_amount
> 31,
12649 _("shift expression is too large"));
12651 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
12652 inst
.instruction
|= 0x00200000; /* sh bit. */
12654 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
12655 inst
.instruction
|= (shift_amount
& 0x03) << 6;
12663 do_t_ssat_usat (1);
12671 Rd
= inst
.operands
[0].reg
;
12672 Rn
= inst
.operands
[2].reg
;
12674 reject_bad_reg (Rd
);
12675 reject_bad_reg (Rn
);
12677 inst
.instruction
|= Rd
<< 8;
12678 inst
.instruction
|= inst
.operands
[1].imm
- 1;
12679 inst
.instruction
|= Rn
<< 16;
12685 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
12686 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
12687 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
12688 || inst
.operands
[2].negative
,
12691 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
12693 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12694 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12695 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12696 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12702 if (!inst
.operands
[2].present
)
12703 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
12705 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
12706 || inst
.operands
[0].reg
== inst
.operands
[2].reg
12707 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
12710 inst
.instruction
|= inst
.operands
[0].reg
;
12711 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12712 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
12713 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
12719 unsigned Rd
, Rn
, Rm
;
12721 Rd
= inst
.operands
[0].reg
;
12722 Rn
= inst
.operands
[1].reg
;
12723 Rm
= inst
.operands
[2].reg
;
12725 reject_bad_reg (Rd
);
12726 reject_bad_reg (Rn
);
12727 reject_bad_reg (Rm
);
12729 inst
.instruction
|= Rd
<< 8;
12730 inst
.instruction
|= Rn
<< 16;
12731 inst
.instruction
|= Rm
;
12732 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
12740 Rd
= inst
.operands
[0].reg
;
12741 Rm
= inst
.operands
[1].reg
;
12743 reject_bad_reg (Rd
);
12744 reject_bad_reg (Rm
);
12746 if (inst
.instruction
<= 0xffff
12747 && inst
.size_req
!= 4
12748 && Rd
<= 7 && Rm
<= 7
12749 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
12751 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12752 inst
.instruction
|= Rd
;
12753 inst
.instruction
|= Rm
<< 3;
12755 else if (unified_syntax
)
12757 if (inst
.instruction
<= 0xffff)
12758 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12759 inst
.instruction
|= Rd
<< 8;
12760 inst
.instruction
|= Rm
;
12761 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
12765 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
12766 _("Thumb encoding does not support rotation"));
12767 constraint (1, BAD_HIREG
);
12774 /* We have to do the following check manually as ARM_EXT_OS only applies
12776 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
12778 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
12779 /* This only applies to the v6m howver, not later architectures. */
12780 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
12781 as_bad (_("SVC is not permitted on this architecture"));
12782 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
12785 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
12794 half
= (inst
.instruction
& 0x10) != 0;
12795 set_it_insn_type_last ();
12796 constraint (inst
.operands
[0].immisreg
,
12797 _("instruction requires register index"));
12799 Rn
= inst
.operands
[0].reg
;
12800 Rm
= inst
.operands
[0].imm
;
12802 constraint (Rn
== REG_SP
, BAD_SP
);
12803 reject_bad_reg (Rm
);
12805 constraint (!half
&& inst
.operands
[0].shifted
,
12806 _("instruction does not allow shifted index"));
12807 inst
.instruction
|= (Rn
<< 16) | Rm
;
12813 if (!inst
.operands
[0].present
)
12814 inst
.operands
[0].imm
= 0;
12816 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
12818 constraint (inst
.size_req
== 2,
12819 _("immediate value out of range"));
12820 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12821 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
12822 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
12826 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12827 inst
.instruction
|= inst
.operands
[0].imm
;
12830 set_it_insn_type (NEUTRAL_IT_INSN
);
12837 do_t_ssat_usat (0);
12845 Rd
= inst
.operands
[0].reg
;
12846 Rn
= inst
.operands
[2].reg
;
12848 reject_bad_reg (Rd
);
12849 reject_bad_reg (Rn
);
12851 inst
.instruction
|= Rd
<< 8;
12852 inst
.instruction
|= inst
.operands
[1].imm
;
12853 inst
.instruction
|= Rn
<< 16;
12856 /* Neon instruction encoder helpers. */
12858 /* Encodings for the different types for various Neon opcodes. */
12860 /* An "invalid" code for the following tables. */
12863 struct neon_tab_entry
12866 unsigned float_or_poly
;
12867 unsigned scalar_or_imm
;
12870 /* Map overloaded Neon opcodes to their respective encodings. */
12871 #define NEON_ENC_TAB \
12872 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12873 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12874 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12875 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12876 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12877 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12878 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12879 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12880 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12881 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12882 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12883 /* Register variants of the following two instructions are encoded as
12884 vcge / vcgt with the operands reversed. */ \
12885 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12886 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12887 X(vfma, N_INV, 0x0000c10, N_INV), \
12888 X(vfms, N_INV, 0x0200c10, N_INV), \
12889 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12890 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12891 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12892 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12893 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12894 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12895 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12896 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12897 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12898 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12899 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12900 X(vshl, 0x0000400, N_INV, 0x0800510), \
12901 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12902 X(vand, 0x0000110, N_INV, 0x0800030), \
12903 X(vbic, 0x0100110, N_INV, 0x0800030), \
12904 X(veor, 0x1000110, N_INV, N_INV), \
12905 X(vorn, 0x0300110, N_INV, 0x0800010), \
12906 X(vorr, 0x0200110, N_INV, 0x0800010), \
12907 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12908 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12909 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12910 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12911 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12912 X(vst1, 0x0000000, 0x0800000, N_INV), \
12913 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12914 X(vst2, 0x0000100, 0x0800100, N_INV), \
12915 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12916 X(vst3, 0x0000200, 0x0800200, N_INV), \
12917 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12918 X(vst4, 0x0000300, 0x0800300, N_INV), \
12919 X(vmovn, 0x1b20200, N_INV, N_INV), \
12920 X(vtrn, 0x1b20080, N_INV, N_INV), \
12921 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12922 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12923 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12924 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12925 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12926 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12927 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12928 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12929 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12930 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12931 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
12932 X(vseleq, 0xe000a00, N_INV, N_INV), \
12933 X(vselvs, 0xe100a00, N_INV, N_INV), \
12934 X(vselge, 0xe200a00, N_INV, N_INV), \
12935 X(vselgt, 0xe300a00, N_INV, N_INV), \
12936 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
12937 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
12938 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
12939 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
12940 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
12941 X(aes, 0x3b00300, N_INV, N_INV), \
12942 X(sha3op, 0x2000c00, N_INV, N_INV), \
12943 X(sha1h, 0x3b902c0, N_INV, N_INV), \
12944 X(sha2op, 0x3ba0380, N_INV, N_INV)
12948 #define X(OPC,I,F,S) N_MNEM_##OPC
12953 static const struct neon_tab_entry neon_enc_tab
[] =
12955 #define X(OPC,I,F,S) { (I), (F), (S) }
12960 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12961 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12962 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12963 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12964 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12965 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12966 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12967 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12968 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12969 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12970 #define NEON_ENC_SINGLE_(X) \
12971 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12972 #define NEON_ENC_DOUBLE_(X) \
12973 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12974 #define NEON_ENC_FPV8_(X) \
12975 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12977 #define NEON_ENCODE(type, inst) \
12980 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12981 inst.is_neon = 1; \
12985 #define check_neon_suffixes \
12988 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12990 as_bad (_("invalid neon suffix for non neon instruction")); \
12996 /* Define shapes for instruction operands. The following mnemonic characters
12997 are used in this table:
12999 F - VFP S<n> register
13000 D - Neon D<n> register
13001 Q - Neon Q<n> register
13005 L - D<n> register list
13007 This table is used to generate various data:
13008 - enumerations of the form NS_DDR to be used as arguments to
13010 - a table classifying shapes into single, double, quad, mixed.
13011 - a table used to drive neon_select_shape. */
13013 #define NEON_SHAPE_DEF \
13014 X(3, (D, D, D), DOUBLE), \
13015 X(3, (Q, Q, Q), QUAD), \
13016 X(3, (D, D, I), DOUBLE), \
13017 X(3, (Q, Q, I), QUAD), \
13018 X(3, (D, D, S), DOUBLE), \
13019 X(3, (Q, Q, S), QUAD), \
13020 X(2, (D, D), DOUBLE), \
13021 X(2, (Q, Q), QUAD), \
13022 X(2, (D, S), DOUBLE), \
13023 X(2, (Q, S), QUAD), \
13024 X(2, (D, R), DOUBLE), \
13025 X(2, (Q, R), QUAD), \
13026 X(2, (D, I), DOUBLE), \
13027 X(2, (Q, I), QUAD), \
13028 X(3, (D, L, D), DOUBLE), \
13029 X(2, (D, Q), MIXED), \
13030 X(2, (Q, D), MIXED), \
13031 X(3, (D, Q, I), MIXED), \
13032 X(3, (Q, D, I), MIXED), \
13033 X(3, (Q, D, D), MIXED), \
13034 X(3, (D, Q, Q), MIXED), \
13035 X(3, (Q, Q, D), MIXED), \
13036 X(3, (Q, D, S), MIXED), \
13037 X(3, (D, Q, S), MIXED), \
13038 X(4, (D, D, D, I), DOUBLE), \
13039 X(4, (Q, Q, Q, I), QUAD), \
13040 X(2, (F, F), SINGLE), \
13041 X(3, (F, F, F), SINGLE), \
13042 X(2, (F, I), SINGLE), \
13043 X(2, (F, D), MIXED), \
13044 X(2, (D, F), MIXED), \
13045 X(3, (F, F, I), MIXED), \
13046 X(4, (R, R, F, F), SINGLE), \
13047 X(4, (F, F, R, R), SINGLE), \
13048 X(3, (D, R, R), DOUBLE), \
13049 X(3, (R, R, D), DOUBLE), \
13050 X(2, (S, R), SINGLE), \
13051 X(2, (R, S), SINGLE), \
13052 X(2, (F, R), SINGLE), \
13053 X(2, (R, F), SINGLE)
13055 #define S2(A,B) NS_##A##B
13056 #define S3(A,B,C) NS_##A##B##C
13057 #define S4(A,B,C,D) NS_##A##B##C##D
13059 #define X(N, L, C) S##N L
13072 enum neon_shape_class
13080 #define X(N, L, C) SC_##C
13082 static enum neon_shape_class neon_shape_class
[] =
13100 /* Register widths of above. */
13101 static unsigned neon_shape_el_size
[] =
13112 struct neon_shape_info
13115 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13118 #define S2(A,B) { SE_##A, SE_##B }
13119 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13120 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13122 #define X(N, L, C) { N, S##N L }
13124 static struct neon_shape_info neon_shape_tab
[] =
13134 /* Bit masks used in type checking given instructions.
13135 'N_EQK' means the type must be the same as (or based on in some way) the key
13136 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13137 set, various other bits can be set as well in order to modify the meaning of
13138 the type constraint. */
13140 enum neon_type_mask
13164 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13165 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13166 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13167 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13168 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13169 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13170 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13171 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13172 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13173 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13174 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13176 N_MAX_NONSPECIAL
= N_P64
13179 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13181 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13182 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13183 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13184 #define N_SUF_32 (N_SU_32 | N_F32)
13185 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13186 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13188 /* Pass this as the first type argument to neon_check_type to ignore types
13190 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13192 /* Select a "shape" for the current instruction (describing register types or
13193 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13194 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13195 function of operand parsing, so this function doesn't need to be called.
13196 Shapes should be listed in order of decreasing length. */
13198 static enum neon_shape
13199 neon_select_shape (enum neon_shape shape
, ...)
13202 enum neon_shape first_shape
= shape
;
13204 /* Fix missing optional operands. FIXME: we don't know at this point how
13205 many arguments we should have, so this makes the assumption that we have
13206 > 1. This is true of all current Neon opcodes, I think, but may not be
13207 true in the future. */
13208 if (!inst
.operands
[1].present
)
13209 inst
.operands
[1] = inst
.operands
[0];
13211 va_start (ap
, shape
);
13213 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13218 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13220 if (!inst
.operands
[j
].present
)
13226 switch (neon_shape_tab
[shape
].el
[j
])
13229 if (!(inst
.operands
[j
].isreg
13230 && inst
.operands
[j
].isvec
13231 && inst
.operands
[j
].issingle
13232 && !inst
.operands
[j
].isquad
))
13237 if (!(inst
.operands
[j
].isreg
13238 && inst
.operands
[j
].isvec
13239 && !inst
.operands
[j
].isquad
13240 && !inst
.operands
[j
].issingle
))
13245 if (!(inst
.operands
[j
].isreg
13246 && !inst
.operands
[j
].isvec
))
13251 if (!(inst
.operands
[j
].isreg
13252 && inst
.operands
[j
].isvec
13253 && inst
.operands
[j
].isquad
13254 && !inst
.operands
[j
].issingle
))
13259 if (!(!inst
.operands
[j
].isreg
13260 && !inst
.operands
[j
].isscalar
))
13265 if (!(!inst
.operands
[j
].isreg
13266 && inst
.operands
[j
].isscalar
))
13276 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13277 /* We've matched all the entries in the shape table, and we don't
13278 have any left over operands which have not been matched. */
13284 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13285 first_error (_("invalid instruction shape"));
13290 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13291 means the Q bit should be set). */
13294 neon_quad (enum neon_shape shape
)
13296 return neon_shape_class
[shape
] == SC_QUAD
;
13300 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13303 /* Allow modification to be made to types which are constrained to be
13304 based on the key element, based on bits set alongside N_EQK. */
13305 if ((typebits
& N_EQK
) != 0)
13307 if ((typebits
& N_HLF
) != 0)
13309 else if ((typebits
& N_DBL
) != 0)
13311 if ((typebits
& N_SGN
) != 0)
13312 *g_type
= NT_signed
;
13313 else if ((typebits
& N_UNS
) != 0)
13314 *g_type
= NT_unsigned
;
13315 else if ((typebits
& N_INT
) != 0)
13316 *g_type
= NT_integer
;
13317 else if ((typebits
& N_FLT
) != 0)
13318 *g_type
= NT_float
;
13319 else if ((typebits
& N_SIZ
) != 0)
13320 *g_type
= NT_untyped
;
13324 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13325 operand type, i.e. the single type specified in a Neon instruction when it
13326 is the only one given. */
13328 static struct neon_type_el
13329 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13331 struct neon_type_el dest
= *key
;
13333 gas_assert ((thisarg
& N_EQK
) != 0);
13335 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13340 /* Convert Neon type and size into compact bitmask representation. */
13342 static enum neon_type_mask
13343 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13350 case 8: return N_8
;
13351 case 16: return N_16
;
13352 case 32: return N_32
;
13353 case 64: return N_64
;
13361 case 8: return N_I8
;
13362 case 16: return N_I16
;
13363 case 32: return N_I32
;
13364 case 64: return N_I64
;
13372 case 16: return N_F16
;
13373 case 32: return N_F32
;
13374 case 64: return N_F64
;
13382 case 8: return N_P8
;
13383 case 16: return N_P16
;
13384 case 64: return N_P64
;
13392 case 8: return N_S8
;
13393 case 16: return N_S16
;
13394 case 32: return N_S32
;
13395 case 64: return N_S64
;
13403 case 8: return N_U8
;
13404 case 16: return N_U16
;
13405 case 32: return N_U32
;
13406 case 64: return N_U64
;
13417 /* Convert compact Neon bitmask type representation to a type and size. Only
13418 handles the case where a single bit is set in the mask. */
13421 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13422 enum neon_type_mask mask
)
13424 if ((mask
& N_EQK
) != 0)
13427 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13429 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13431 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13433 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13438 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13440 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13441 *type
= NT_unsigned
;
13442 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13443 *type
= NT_integer
;
13444 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13445 *type
= NT_untyped
;
13446 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13448 else if ((mask
& (N_F16
| N_F32
| N_F64
)) != 0)
13456 /* Modify a bitmask of allowed types. This is only needed for type
13460 modify_types_allowed (unsigned allowed
, unsigned mods
)
13463 enum neon_el_type type
;
13469 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13471 if (el_type_of_type_chk (&type
, &size
,
13472 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13474 neon_modify_type_size (mods
, &type
, &size
);
13475 destmask
|= type_chk_of_el_type (type
, size
);
13482 /* Check type and return type classification.
13483 The manual states (paraphrase): If one datatype is given, it indicates the
13485 - the second operand, if there is one
13486 - the operand, if there is no second operand
13487 - the result, if there are no operands.
13488 This isn't quite good enough though, so we use a concept of a "key" datatype
13489 which is set on a per-instruction basis, which is the one which matters when
13490 only one data type is written.
13491 Note: this function has side-effects (e.g. filling in missing operands). All
13492 Neon instructions should call it before performing bit encoding. */
13494 static struct neon_type_el
13495 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13498 unsigned i
, pass
, key_el
= 0;
13499 unsigned types
[NEON_MAX_TYPE_ELS
];
13500 enum neon_el_type k_type
= NT_invtype
;
13501 unsigned k_size
= -1u;
13502 struct neon_type_el badtype
= {NT_invtype
, -1};
13503 unsigned key_allowed
= 0;
13505 /* Optional registers in Neon instructions are always (not) in operand 1.
13506 Fill in the missing operand here, if it was omitted. */
13507 if (els
> 1 && !inst
.operands
[1].present
)
13508 inst
.operands
[1] = inst
.operands
[0];
13510 /* Suck up all the varargs. */
13512 for (i
= 0; i
< els
; i
++)
13514 unsigned thisarg
= va_arg (ap
, unsigned);
13515 if (thisarg
== N_IGNORE_TYPE
)
13520 types
[i
] = thisarg
;
13521 if ((thisarg
& N_KEY
) != 0)
13526 if (inst
.vectype
.elems
> 0)
13527 for (i
= 0; i
< els
; i
++)
13528 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13530 first_error (_("types specified in both the mnemonic and operands"));
13534 /* Duplicate inst.vectype elements here as necessary.
13535 FIXME: No idea if this is exactly the same as the ARM assembler,
13536 particularly when an insn takes one register and one non-register
13538 if (inst
.vectype
.elems
== 1 && els
> 1)
13541 inst
.vectype
.elems
= els
;
13542 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13543 for (j
= 0; j
< els
; j
++)
13545 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13548 else if (inst
.vectype
.elems
== 0 && els
> 0)
13551 /* No types were given after the mnemonic, so look for types specified
13552 after each operand. We allow some flexibility here; as long as the
13553 "key" operand has a type, we can infer the others. */
13554 for (j
= 0; j
< els
; j
++)
13555 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13556 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13558 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13560 for (j
= 0; j
< els
; j
++)
13561 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13562 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13567 first_error (_("operand types can't be inferred"));
13571 else if (inst
.vectype
.elems
!= els
)
13573 first_error (_("type specifier has the wrong number of parts"));
13577 for (pass
= 0; pass
< 2; pass
++)
13579 for (i
= 0; i
< els
; i
++)
13581 unsigned thisarg
= types
[i
];
13582 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13583 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
13584 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
13585 unsigned g_size
= inst
.vectype
.el
[i
].size
;
13587 /* Decay more-specific signed & unsigned types to sign-insensitive
13588 integer types if sign-specific variants are unavailable. */
13589 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
13590 && (types_allowed
& N_SU_ALL
) == 0)
13591 g_type
= NT_integer
;
13593 /* If only untyped args are allowed, decay any more specific types to
13594 them. Some instructions only care about signs for some element
13595 sizes, so handle that properly. */
13596 if (((types_allowed
& N_UNT
) == 0)
13597 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
13598 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
13599 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
13600 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
13601 g_type
= NT_untyped
;
13605 if ((thisarg
& N_KEY
) != 0)
13609 key_allowed
= thisarg
& ~N_KEY
;
13614 if ((thisarg
& N_VFP
) != 0)
13616 enum neon_shape_el regshape
;
13617 unsigned regwidth
, match
;
13619 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13622 first_error (_("invalid instruction shape"));
13625 regshape
= neon_shape_tab
[ns
].el
[i
];
13626 regwidth
= neon_shape_el_size
[regshape
];
13628 /* In VFP mode, operands must match register widths. If we
13629 have a key operand, use its width, else use the width of
13630 the current operand. */
13636 if (regwidth
!= match
)
13638 first_error (_("operand size must match register width"));
13643 if ((thisarg
& N_EQK
) == 0)
13645 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
13647 if ((given_type
& types_allowed
) == 0)
13649 first_error (_("bad type in Neon instruction"));
13655 enum neon_el_type mod_k_type
= k_type
;
13656 unsigned mod_k_size
= k_size
;
13657 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
13658 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
13660 first_error (_("inconsistent types in Neon instruction"));
13668 return inst
.vectype
.el
[key_el
];
13671 /* Neon-style VFP instruction forwarding. */
13673 /* Thumb VFP instructions have 0xE in the condition field. */
13676 do_vfp_cond_or_thumb (void)
13681 inst
.instruction
|= 0xe0000000;
13683 inst
.instruction
|= inst
.cond
<< 28;
13686 /* Look up and encode a simple mnemonic, for use as a helper function for the
13687 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13688 etc. It is assumed that operand parsing has already been done, and that the
13689 operands are in the form expected by the given opcode (this isn't necessarily
13690 the same as the form in which they were parsed, hence some massaging must
13691 take place before this function is called).
13692 Checks current arch version against that in the looked-up opcode. */
13695 do_vfp_nsyn_opcode (const char *opname
)
13697 const struct asm_opcode
*opcode
;
13699 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
13704 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
13705 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
13712 inst
.instruction
= opcode
->tvalue
;
13713 opcode
->tencode ();
13717 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
13718 opcode
->aencode ();
13723 do_vfp_nsyn_add_sub (enum neon_shape rs
)
13725 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
13730 do_vfp_nsyn_opcode ("fadds");
13732 do_vfp_nsyn_opcode ("fsubs");
13737 do_vfp_nsyn_opcode ("faddd");
13739 do_vfp_nsyn_opcode ("fsubd");
13743 /* Check operand types to see if this is a VFP instruction, and if so call
13747 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
13749 enum neon_shape rs
;
13750 struct neon_type_el et
;
13755 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
13756 et
= neon_check_type (2, rs
,
13757 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13761 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
13762 et
= neon_check_type (3, rs
,
13763 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13770 if (et
.type
!= NT_invtype
)
13781 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
13783 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
13788 do_vfp_nsyn_opcode ("fmacs");
13790 do_vfp_nsyn_opcode ("fnmacs");
13795 do_vfp_nsyn_opcode ("fmacd");
13797 do_vfp_nsyn_opcode ("fnmacd");
13802 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
13804 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
13809 do_vfp_nsyn_opcode ("ffmas");
13811 do_vfp_nsyn_opcode ("ffnmas");
13816 do_vfp_nsyn_opcode ("ffmad");
13818 do_vfp_nsyn_opcode ("ffnmad");
13823 do_vfp_nsyn_mul (enum neon_shape rs
)
13826 do_vfp_nsyn_opcode ("fmuls");
13828 do_vfp_nsyn_opcode ("fmuld");
13832 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
13834 int is_neg
= (inst
.instruction
& 0x80) != 0;
13835 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
13840 do_vfp_nsyn_opcode ("fnegs");
13842 do_vfp_nsyn_opcode ("fabss");
13847 do_vfp_nsyn_opcode ("fnegd");
13849 do_vfp_nsyn_opcode ("fabsd");
13853 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13854 insns belong to Neon, and are handled elsewhere. */
13857 do_vfp_nsyn_ldm_stm (int is_dbmode
)
13859 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
13863 do_vfp_nsyn_opcode ("fldmdbs");
13865 do_vfp_nsyn_opcode ("fldmias");
13870 do_vfp_nsyn_opcode ("fstmdbs");
13872 do_vfp_nsyn_opcode ("fstmias");
13877 do_vfp_nsyn_sqrt (void)
13879 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
13880 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13883 do_vfp_nsyn_opcode ("fsqrts");
13885 do_vfp_nsyn_opcode ("fsqrtd");
13889 do_vfp_nsyn_div (void)
13891 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
13892 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
13893 N_F32
| N_F64
| N_KEY
| N_VFP
);
13896 do_vfp_nsyn_opcode ("fdivs");
13898 do_vfp_nsyn_opcode ("fdivd");
13902 do_vfp_nsyn_nmul (void)
13904 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
13905 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
13906 N_F32
| N_F64
| N_KEY
| N_VFP
);
13910 NEON_ENCODE (SINGLE
, inst
);
13911 do_vfp_sp_dyadic ();
13915 NEON_ENCODE (DOUBLE
, inst
);
13916 do_vfp_dp_rd_rn_rm ();
13918 do_vfp_cond_or_thumb ();
13922 do_vfp_nsyn_cmp (void)
13924 if (inst
.operands
[1].isreg
)
13926 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
13927 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13931 NEON_ENCODE (SINGLE
, inst
);
13932 do_vfp_sp_monadic ();
13936 NEON_ENCODE (DOUBLE
, inst
);
13937 do_vfp_dp_rd_rm ();
13942 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
13943 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
13945 switch (inst
.instruction
& 0x0fffffff)
13948 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
13951 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
13959 NEON_ENCODE (SINGLE
, inst
);
13960 do_vfp_sp_compare_z ();
13964 NEON_ENCODE (DOUBLE
, inst
);
13968 do_vfp_cond_or_thumb ();
13972 nsyn_insert_sp (void)
13974 inst
.operands
[1] = inst
.operands
[0];
13975 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
13976 inst
.operands
[0].reg
= REG_SP
;
13977 inst
.operands
[0].isreg
= 1;
13978 inst
.operands
[0].writeback
= 1;
13979 inst
.operands
[0].present
= 1;
13983 do_vfp_nsyn_push (void)
13986 if (inst
.operands
[1].issingle
)
13987 do_vfp_nsyn_opcode ("fstmdbs");
13989 do_vfp_nsyn_opcode ("fstmdbd");
13993 do_vfp_nsyn_pop (void)
13996 if (inst
.operands
[1].issingle
)
13997 do_vfp_nsyn_opcode ("fldmias");
13999 do_vfp_nsyn_opcode ("fldmiad");
14002 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14003 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14006 neon_dp_fixup (struct arm_it
* insn
)
14008 unsigned int i
= insn
->instruction
;
14013 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14024 insn
->instruction
= i
;
14027 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14031 neon_logbits (unsigned x
)
14033 return ffs (x
) - 4;
14036 #define LOW4(R) ((R) & 0xf)
14037 #define HI1(R) (((R) >> 4) & 1)
14039 /* Encode insns with bit pattern:
14041 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14042 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14044 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14045 different meaning for some instruction. */
14048 neon_three_same (int isquad
, int ubit
, int size
)
14050 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14051 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14052 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14053 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14054 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14055 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14056 inst
.instruction
|= (isquad
!= 0) << 6;
14057 inst
.instruction
|= (ubit
!= 0) << 24;
14059 inst
.instruction
|= neon_logbits (size
) << 20;
14061 neon_dp_fixup (&inst
);
14064 /* Encode instructions of the form:
14066 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14067 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14069 Don't write size if SIZE == -1. */
14072 neon_two_same (int qbit
, int ubit
, int size
)
14074 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14075 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14076 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14077 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14078 inst
.instruction
|= (qbit
!= 0) << 6;
14079 inst
.instruction
|= (ubit
!= 0) << 24;
14082 inst
.instruction
|= neon_logbits (size
) << 18;
14084 neon_dp_fixup (&inst
);
14087 /* Neon instruction encoders, in approximate order of appearance. */
14090 do_neon_dyadic_i_su (void)
14092 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14093 struct neon_type_el et
= neon_check_type (3, rs
,
14094 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14095 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14099 do_neon_dyadic_i64_su (void)
14101 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14102 struct neon_type_el et
= neon_check_type (3, rs
,
14103 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14104 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14108 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14111 unsigned size
= et
.size
>> 3;
14112 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14113 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14114 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14115 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14116 inst
.instruction
|= (isquad
!= 0) << 6;
14117 inst
.instruction
|= immbits
<< 16;
14118 inst
.instruction
|= (size
>> 3) << 7;
14119 inst
.instruction
|= (size
& 0x7) << 19;
14121 inst
.instruction
|= (uval
!= 0) << 24;
14123 neon_dp_fixup (&inst
);
14127 do_neon_shl_imm (void)
14129 if (!inst
.operands
[2].isreg
)
14131 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14132 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14133 int imm
= inst
.operands
[2].imm
;
14135 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14136 _("immediate out of range for shift"));
14137 NEON_ENCODE (IMMED
, inst
);
14138 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14142 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14143 struct neon_type_el et
= neon_check_type (3, rs
,
14144 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14147 /* VSHL/VQSHL 3-register variants have syntax such as:
14149 whereas other 3-register operations encoded by neon_three_same have
14152 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14154 tmp
= inst
.operands
[2].reg
;
14155 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14156 inst
.operands
[1].reg
= tmp
;
14157 NEON_ENCODE (INTEGER
, inst
);
14158 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14163 do_neon_qshl_imm (void)
14165 if (!inst
.operands
[2].isreg
)
14167 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14168 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14169 int imm
= inst
.operands
[2].imm
;
14171 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14172 _("immediate out of range for shift"));
14173 NEON_ENCODE (IMMED
, inst
);
14174 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14178 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14179 struct neon_type_el et
= neon_check_type (3, rs
,
14180 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14183 /* See note in do_neon_shl_imm. */
14184 tmp
= inst
.operands
[2].reg
;
14185 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14186 inst
.operands
[1].reg
= tmp
;
14187 NEON_ENCODE (INTEGER
, inst
);
14188 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14193 do_neon_rshl (void)
14195 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14196 struct neon_type_el et
= neon_check_type (3, rs
,
14197 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14200 tmp
= inst
.operands
[2].reg
;
14201 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14202 inst
.operands
[1].reg
= tmp
;
14203 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14207 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14209 /* Handle .I8 pseudo-instructions. */
14212 /* Unfortunately, this will make everything apart from zero out-of-range.
14213 FIXME is this the intended semantics? There doesn't seem much point in
14214 accepting .I8 if so. */
14215 immediate
|= immediate
<< 8;
14221 if (immediate
== (immediate
& 0x000000ff))
14223 *immbits
= immediate
;
14226 else if (immediate
== (immediate
& 0x0000ff00))
14228 *immbits
= immediate
>> 8;
14231 else if (immediate
== (immediate
& 0x00ff0000))
14233 *immbits
= immediate
>> 16;
14236 else if (immediate
== (immediate
& 0xff000000))
14238 *immbits
= immediate
>> 24;
14241 if ((immediate
& 0xffff) != (immediate
>> 16))
14242 goto bad_immediate
;
14243 immediate
&= 0xffff;
14246 if (immediate
== (immediate
& 0x000000ff))
14248 *immbits
= immediate
;
14251 else if (immediate
== (immediate
& 0x0000ff00))
14253 *immbits
= immediate
>> 8;
14258 first_error (_("immediate value out of range"));
14263 do_neon_logic (void)
14265 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14267 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14268 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14269 /* U bit and size field were set as part of the bitmask. */
14270 NEON_ENCODE (INTEGER
, inst
);
14271 neon_three_same (neon_quad (rs
), 0, -1);
14275 const int three_ops_form
= (inst
.operands
[2].present
14276 && !inst
.operands
[2].isreg
);
14277 const int immoperand
= (three_ops_form
? 2 : 1);
14278 enum neon_shape rs
= (three_ops_form
14279 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14280 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14281 struct neon_type_el et
= neon_check_type (2, rs
,
14282 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14283 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14287 if (et
.type
== NT_invtype
)
14290 if (three_ops_form
)
14291 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14292 _("first and second operands shall be the same register"));
14294 NEON_ENCODE (IMMED
, inst
);
14296 immbits
= inst
.operands
[immoperand
].imm
;
14299 /* .i64 is a pseudo-op, so the immediate must be a repeating
14301 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14302 inst
.operands
[immoperand
].reg
: 0))
14304 /* Set immbits to an invalid constant. */
14305 immbits
= 0xdeadbeef;
14312 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14316 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14320 /* Pseudo-instruction for VBIC. */
14321 neon_invert_size (&immbits
, 0, et
.size
);
14322 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14326 /* Pseudo-instruction for VORR. */
14327 neon_invert_size (&immbits
, 0, et
.size
);
14328 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14338 inst
.instruction
|= neon_quad (rs
) << 6;
14339 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14340 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14341 inst
.instruction
|= cmode
<< 8;
14342 neon_write_immbits (immbits
);
14344 neon_dp_fixup (&inst
);
14349 do_neon_bitfield (void)
14351 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14352 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14353 neon_three_same (neon_quad (rs
), 0, -1);
14357 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14360 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14361 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14363 if (et
.type
== NT_float
)
14365 NEON_ENCODE (FLOAT
, inst
);
14366 neon_three_same (neon_quad (rs
), 0, -1);
14370 NEON_ENCODE (INTEGER
, inst
);
14371 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14376 do_neon_dyadic_if_su (void)
14378 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14382 do_neon_dyadic_if_su_d (void)
14384 /* This version only allow D registers, but that constraint is enforced during
14385 operand parsing so we don't need to do anything extra here. */
14386 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14390 do_neon_dyadic_if_i_d (void)
14392 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14393 affected if we specify unsigned args. */
14394 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14397 enum vfp_or_neon_is_neon_bits
14400 NEON_CHECK_ARCH
= 2,
14401 NEON_CHECK_ARCH8
= 4
14404 /* Call this function if an instruction which may have belonged to the VFP or
14405 Neon instruction sets, but turned out to be a Neon instruction (due to the
14406 operand types involved, etc.). We have to check and/or fix-up a couple of
14409 - Make sure the user hasn't attempted to make a Neon instruction
14411 - Alter the value in the condition code field if necessary.
14412 - Make sure that the arch supports Neon instructions.
14414 Which of these operations take place depends on bits from enum
14415 vfp_or_neon_is_neon_bits.
14417 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14418 current instruction's condition is COND_ALWAYS, the condition field is
14419 changed to inst.uncond_value. This is necessary because instructions shared
14420 between VFP and Neon may be conditional for the VFP variants only, and the
14421 unconditional Neon version must have, e.g., 0xF in the condition field. */
14424 vfp_or_neon_is_neon (unsigned check
)
14426 /* Conditions are always legal in Thumb mode (IT blocks). */
14427 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14429 if (inst
.cond
!= COND_ALWAYS
)
14431 first_error (_(BAD_COND
));
14434 if (inst
.uncond_value
!= -1)
14435 inst
.instruction
|= inst
.uncond_value
<< 28;
14438 if ((check
& NEON_CHECK_ARCH
)
14439 && !mark_feature_used (&fpu_neon_ext_v1
))
14441 first_error (_(BAD_FPU
));
14445 if ((check
& NEON_CHECK_ARCH8
)
14446 && !mark_feature_used (&fpu_neon_ext_armv8
))
14448 first_error (_(BAD_FPU
));
14456 do_neon_addsub_if_i (void)
14458 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14461 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14464 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14465 affected if we specify unsigned args. */
14466 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14469 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14471 V<op> A,B (A is operand 0, B is operand 2)
14476 so handle that case specially. */
14479 neon_exchange_operands (void)
14481 void *scratch
= alloca (sizeof (inst
.operands
[0]));
14482 if (inst
.operands
[1].present
)
14484 /* Swap operands[1] and operands[2]. */
14485 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14486 inst
.operands
[1] = inst
.operands
[2];
14487 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14491 inst
.operands
[1] = inst
.operands
[2];
14492 inst
.operands
[2] = inst
.operands
[0];
14497 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14499 if (inst
.operands
[2].isreg
)
14502 neon_exchange_operands ();
14503 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14507 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14508 struct neon_type_el et
= neon_check_type (2, rs
,
14509 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
14511 NEON_ENCODE (IMMED
, inst
);
14512 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14513 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14514 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14515 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14516 inst
.instruction
|= neon_quad (rs
) << 6;
14517 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14518 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14520 neon_dp_fixup (&inst
);
14527 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
14531 do_neon_cmp_inv (void)
14533 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
14539 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
14542 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14543 scalars, which are encoded in 5 bits, M : Rm.
14544 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14545 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14549 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
14551 unsigned regno
= NEON_SCALAR_REG (scalar
);
14552 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
14557 if (regno
> 7 || elno
> 3)
14559 return regno
| (elno
<< 3);
14562 if (regno
> 15 || elno
> 1)
14564 return regno
| (elno
<< 4);
14568 first_error (_("scalar out of range for multiply instruction"));
14574 /* Encode multiply / multiply-accumulate scalar instructions. */
14577 neon_mul_mac (struct neon_type_el et
, int ubit
)
14581 /* Give a more helpful error message if we have an invalid type. */
14582 if (et
.type
== NT_invtype
)
14585 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
14586 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14587 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14588 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14589 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14590 inst
.instruction
|= LOW4 (scalar
);
14591 inst
.instruction
|= HI1 (scalar
) << 5;
14592 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14593 inst
.instruction
|= neon_logbits (et
.size
) << 20;
14594 inst
.instruction
|= (ubit
!= 0) << 24;
14596 neon_dp_fixup (&inst
);
14600 do_neon_mac_maybe_scalar (void)
14602 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
14605 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14608 if (inst
.operands
[2].isscalar
)
14610 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14611 struct neon_type_el et
= neon_check_type (3, rs
,
14612 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
14613 NEON_ENCODE (SCALAR
, inst
);
14614 neon_mul_mac (et
, neon_quad (rs
));
14618 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14619 affected if we specify unsigned args. */
14620 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14625 do_neon_fmac (void)
14627 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
14630 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14633 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14639 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14640 struct neon_type_el et
= neon_check_type (3, rs
,
14641 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14642 neon_three_same (neon_quad (rs
), 0, et
.size
);
14645 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14646 same types as the MAC equivalents. The polynomial type for this instruction
14647 is encoded the same as the integer type. */
14652 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
14655 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14658 if (inst
.operands
[2].isscalar
)
14659 do_neon_mac_maybe_scalar ();
14661 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
14665 do_neon_qdmulh (void)
14667 if (inst
.operands
[2].isscalar
)
14669 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14670 struct neon_type_el et
= neon_check_type (3, rs
,
14671 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14672 NEON_ENCODE (SCALAR
, inst
);
14673 neon_mul_mac (et
, neon_quad (rs
));
14677 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14678 struct neon_type_el et
= neon_check_type (3, rs
,
14679 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14680 NEON_ENCODE (INTEGER
, inst
);
14681 /* The U bit (rounding) comes from bit mask. */
14682 neon_three_same (neon_quad (rs
), 0, et
.size
);
14687 do_neon_fcmp_absolute (void)
14689 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14690 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14691 /* Size field comes from bit mask. */
14692 neon_three_same (neon_quad (rs
), 1, -1);
14696 do_neon_fcmp_absolute_inv (void)
14698 neon_exchange_operands ();
14699 do_neon_fcmp_absolute ();
14703 do_neon_step (void)
14705 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14706 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14707 neon_three_same (neon_quad (rs
), 0, -1);
14711 do_neon_abs_neg (void)
14713 enum neon_shape rs
;
14714 struct neon_type_el et
;
14716 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
14719 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14722 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14723 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
14725 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14726 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14727 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14728 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14729 inst
.instruction
|= neon_quad (rs
) << 6;
14730 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14731 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14733 neon_dp_fixup (&inst
);
14739 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14740 struct neon_type_el et
= neon_check_type (2, rs
,
14741 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14742 int imm
= inst
.operands
[2].imm
;
14743 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14744 _("immediate out of range for insert"));
14745 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14751 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14752 struct neon_type_el et
= neon_check_type (2, rs
,
14753 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14754 int imm
= inst
.operands
[2].imm
;
14755 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14756 _("immediate out of range for insert"));
14757 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
14761 do_neon_qshlu_imm (void)
14763 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14764 struct neon_type_el et
= neon_check_type (2, rs
,
14765 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
14766 int imm
= inst
.operands
[2].imm
;
14767 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14768 _("immediate out of range for shift"));
14769 /* Only encodes the 'U present' variant of the instruction.
14770 In this case, signed types have OP (bit 8) set to 0.
14771 Unsigned types have OP set to 1. */
14772 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
14773 /* The rest of the bits are the same as other immediate shifts. */
14774 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14778 do_neon_qmovn (void)
14780 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
14781 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
14782 /* Saturating move where operands can be signed or unsigned, and the
14783 destination has the same signedness. */
14784 NEON_ENCODE (INTEGER
, inst
);
14785 if (et
.type
== NT_unsigned
)
14786 inst
.instruction
|= 0xc0;
14788 inst
.instruction
|= 0x80;
14789 neon_two_same (0, 1, et
.size
/ 2);
14793 do_neon_qmovun (void)
14795 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
14796 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
14797 /* Saturating move with unsigned results. Operands must be signed. */
14798 NEON_ENCODE (INTEGER
, inst
);
14799 neon_two_same (0, 1, et
.size
/ 2);
14803 do_neon_rshift_sat_narrow (void)
14805 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14806 or unsigned. If operands are unsigned, results must also be unsigned. */
14807 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
14808 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
14809 int imm
= inst
.operands
[2].imm
;
14810 /* This gets the bounds check, size encoding and immediate bits calculation
14814 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14815 VQMOVN.I<size> <Dd>, <Qm>. */
14818 inst
.operands
[2].present
= 0;
14819 inst
.instruction
= N_MNEM_vqmovn
;
14824 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14825 _("immediate out of range"));
14826 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
14830 do_neon_rshift_sat_narrow_u (void)
14832 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14833 or unsigned. If operands are unsigned, results must also be unsigned. */
14834 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
14835 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
14836 int imm
= inst
.operands
[2].imm
;
14837 /* This gets the bounds check, size encoding and immediate bits calculation
14841 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14842 VQMOVUN.I<size> <Dd>, <Qm>. */
14845 inst
.operands
[2].present
= 0;
14846 inst
.instruction
= N_MNEM_vqmovun
;
14851 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14852 _("immediate out of range"));
14853 /* FIXME: The manual is kind of unclear about what value U should have in
14854 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14856 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
14860 do_neon_movn (void)
14862 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
14863 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
14864 NEON_ENCODE (INTEGER
, inst
);
14865 neon_two_same (0, 1, et
.size
/ 2);
14869 do_neon_rshift_narrow (void)
14871 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
14872 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
14873 int imm
= inst
.operands
[2].imm
;
14874 /* This gets the bounds check, size encoding and immediate bits calculation
14878 /* If immediate is zero then we are a pseudo-instruction for
14879 VMOVN.I<size> <Dd>, <Qm> */
14882 inst
.operands
[2].present
= 0;
14883 inst
.instruction
= N_MNEM_vmovn
;
14888 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14889 _("immediate out of range for narrowing operation"));
14890 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
14894 do_neon_shll (void)
14896 /* FIXME: Type checking when lengthening. */
14897 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
14898 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
14899 unsigned imm
= inst
.operands
[2].imm
;
14901 if (imm
== et
.size
)
14903 /* Maximum shift variant. */
14904 NEON_ENCODE (INTEGER
, inst
);
14905 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14906 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14907 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14908 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14909 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14911 neon_dp_fixup (&inst
);
14915 /* A more-specific type check for non-max versions. */
14916 et
= neon_check_type (2, NS_QDI
,
14917 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
14918 NEON_ENCODE (IMMED
, inst
);
14919 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
14923 /* Check the various types for the VCVT instruction, and return which version
14924 the current instruction is. */
14926 #define CVT_FLAVOUR_VAR \
14927 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
14928 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
14929 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
14930 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
14931 /* Half-precision conversions. */ \
14932 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
14933 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
14934 /* VFP instructions. */ \
14935 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
14936 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
14937 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14938 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14939 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
14940 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
14941 /* VFP instructions with bitshift. */ \
14942 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
14943 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
14944 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
14945 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
14946 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
14947 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
14948 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
14949 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
14951 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14952 neon_cvt_flavour_##C,
14954 /* The different types of conversions we can do. */
14955 enum neon_cvt_flavour
14958 neon_cvt_flavour_invalid
,
14959 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
14964 static enum neon_cvt_flavour
14965 get_neon_cvt_flavour (enum neon_shape rs
)
14967 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
14968 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
14969 if (et.type != NT_invtype) \
14971 inst.error = NULL; \
14972 return (neon_cvt_flavour_##C); \
14975 struct neon_type_el et
;
14976 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
14977 || rs
== NS_FF
) ? N_VFP
: 0;
14978 /* The instruction versions which take an immediate take one register
14979 argument, which is extended to the width of the full register. Thus the
14980 "source" and "destination" registers must have the same width. Hack that
14981 here by making the size equal to the key (wider, in this case) operand. */
14982 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
14986 return neon_cvt_flavour_invalid
;
15001 /* Neon-syntax VFP conversions. */
15004 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15006 const char *opname
= 0;
15008 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
15010 /* Conversions with immediate bitshift. */
15011 const char *enc
[] =
15013 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15019 if (flavour
< (int) ARRAY_SIZE (enc
))
15021 opname
= enc
[flavour
];
15022 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15023 _("operands 0 and 1 must be the same register"));
15024 inst
.operands
[1] = inst
.operands
[2];
15025 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15030 /* Conversions without bitshift. */
15031 const char *enc
[] =
15033 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15039 if (flavour
< (int) ARRAY_SIZE (enc
))
15040 opname
= enc
[flavour
];
15044 do_vfp_nsyn_opcode (opname
);
15048 do_vfp_nsyn_cvtz (void)
15050 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
15051 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15052 const char *enc
[] =
15054 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15060 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15061 do_vfp_nsyn_opcode (enc
[flavour
]);
15065 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15066 enum neon_cvt_mode mode
)
15071 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15072 D register operands. */
15073 if (flavour
== neon_cvt_flavour_s32_f64
15074 || flavour
== neon_cvt_flavour_u32_f64
)
15075 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15078 set_it_insn_type (OUTSIDE_IT_INSN
);
15082 case neon_cvt_flavour_s32_f64
:
15086 case neon_cvt_flavour_s32_f32
:
15090 case neon_cvt_flavour_u32_f64
:
15094 case neon_cvt_flavour_u32_f32
:
15099 first_error (_("invalid instruction shape"));
15105 case neon_cvt_mode_a
: rm
= 0; break;
15106 case neon_cvt_mode_n
: rm
= 1; break;
15107 case neon_cvt_mode_p
: rm
= 2; break;
15108 case neon_cvt_mode_m
: rm
= 3; break;
15109 default: first_error (_("invalid rounding mode")); return;
15112 NEON_ENCODE (FPV8
, inst
);
15113 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15114 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15115 inst
.instruction
|= sz
<< 8;
15116 inst
.instruction
|= op
<< 7;
15117 inst
.instruction
|= rm
<< 16;
15118 inst
.instruction
|= 0xf0000000;
15119 inst
.is_neon
= TRUE
;
15123 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15125 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15126 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
15127 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15129 /* PR11109: Handle round-to-zero for VCVT conversions. */
15130 if (mode
== neon_cvt_mode_z
15131 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15132 && (flavour
== neon_cvt_flavour_s32_f32
15133 || flavour
== neon_cvt_flavour_u32_f32
15134 || flavour
== neon_cvt_flavour_s32_f64
15135 || flavour
== neon_cvt_flavour_u32_f64
)
15136 && (rs
== NS_FD
|| rs
== NS_FF
))
15138 do_vfp_nsyn_cvtz ();
15142 /* VFP rather than Neon conversions. */
15143 if (flavour
>= neon_cvt_flavour_first_fp
)
15145 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15146 do_vfp_nsyn_cvt (rs
, flavour
);
15148 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15159 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15161 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15164 /* Fixed-point conversion with #0 immediate is encoded as an
15165 integer conversion. */
15166 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15168 immbits
= 32 - inst
.operands
[2].imm
;
15169 NEON_ENCODE (IMMED
, inst
);
15170 if (flavour
!= neon_cvt_flavour_invalid
)
15171 inst
.instruction
|= enctab
[flavour
];
15172 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15173 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15174 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15175 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15176 inst
.instruction
|= neon_quad (rs
) << 6;
15177 inst
.instruction
|= 1 << 21;
15178 inst
.instruction
|= immbits
<< 16;
15180 neon_dp_fixup (&inst
);
15186 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15188 NEON_ENCODE (FLOAT
, inst
);
15189 set_it_insn_type (OUTSIDE_IT_INSN
);
15191 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15194 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15195 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15196 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15197 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15198 inst
.instruction
|= neon_quad (rs
) << 6;
15199 inst
.instruction
|= (flavour
== neon_cvt_flavour_u32_f32
) << 7;
15200 inst
.instruction
|= mode
<< 8;
15202 inst
.instruction
|= 0xfc000000;
15204 inst
.instruction
|= 0xf0000000;
15210 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
15212 NEON_ENCODE (INTEGER
, inst
);
15214 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15217 if (flavour
!= neon_cvt_flavour_invalid
)
15218 inst
.instruction
|= enctab
[flavour
];
15220 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15221 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15222 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15223 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15224 inst
.instruction
|= neon_quad (rs
) << 6;
15225 inst
.instruction
|= 2 << 18;
15227 neon_dp_fixup (&inst
);
15232 /* Half-precision conversions for Advanced SIMD -- neon. */
15237 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15239 as_bad (_("operand size must match register width"));
15244 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15246 as_bad (_("operand size must match register width"));
15251 inst
.instruction
= 0x3b60600;
15253 inst
.instruction
= 0x3b60700;
15255 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15256 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15257 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15258 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15259 neon_dp_fixup (&inst
);
15263 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15264 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15265 do_vfp_nsyn_cvt (rs
, flavour
);
15267 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15272 do_neon_cvtr (void)
15274 do_neon_cvt_1 (neon_cvt_mode_x
);
15280 do_neon_cvt_1 (neon_cvt_mode_z
);
15284 do_neon_cvta (void)
15286 do_neon_cvt_1 (neon_cvt_mode_a
);
15290 do_neon_cvtn (void)
15292 do_neon_cvt_1 (neon_cvt_mode_n
);
15296 do_neon_cvtp (void)
15298 do_neon_cvt_1 (neon_cvt_mode_p
);
15302 do_neon_cvtm (void)
15304 do_neon_cvt_1 (neon_cvt_mode_m
);
15308 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15311 mark_feature_used (&fpu_vfp_ext_armv8
);
15313 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15314 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15315 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15316 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15317 inst
.instruction
|= to
? 0x10000 : 0;
15318 inst
.instruction
|= t
? 0x80 : 0;
15319 inst
.instruction
|= is_double
? 0x100 : 0;
15320 do_vfp_cond_or_thumb ();
15324 do_neon_cvttb_1 (bfd_boolean t
)
15326 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_DF
, NS_NULL
);
15330 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15333 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15335 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15338 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15340 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15342 /* The VCVTB and VCVTT instructions with D-register operands
15343 don't work for SP only targets. */
15344 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15348 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15350 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15352 /* The VCVTB and VCVTT instructions with D-register operands
15353 don't work for SP only targets. */
15354 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15358 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15365 do_neon_cvtb (void)
15367 do_neon_cvttb_1 (FALSE
);
15372 do_neon_cvtt (void)
15374 do_neon_cvttb_1 (TRUE
);
15378 neon_move_immediate (void)
15380 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15381 struct neon_type_el et
= neon_check_type (2, rs
,
15382 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15383 unsigned immlo
, immhi
= 0, immbits
;
15384 int op
, cmode
, float_p
;
15386 constraint (et
.type
== NT_invtype
,
15387 _("operand size must be specified for immediate VMOV"));
15389 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15390 op
= (inst
.instruction
& (1 << 5)) != 0;
15392 immlo
= inst
.operands
[1].imm
;
15393 if (inst
.operands
[1].regisimm
)
15394 immhi
= inst
.operands
[1].reg
;
15396 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
15397 _("immediate has bits set outside the operand size"));
15399 float_p
= inst
.operands
[1].immisfloat
;
15401 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
15402 et
.size
, et
.type
)) == FAIL
)
15404 /* Invert relevant bits only. */
15405 neon_invert_size (&immlo
, &immhi
, et
.size
);
15406 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15407 with one or the other; those cases are caught by
15408 neon_cmode_for_move_imm. */
15410 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
15411 &op
, et
.size
, et
.type
)) == FAIL
)
15413 first_error (_("immediate out of range"));
15418 inst
.instruction
&= ~(1 << 5);
15419 inst
.instruction
|= op
<< 5;
15421 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15422 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15423 inst
.instruction
|= neon_quad (rs
) << 6;
15424 inst
.instruction
|= cmode
<< 8;
15426 neon_write_immbits (immbits
);
15432 if (inst
.operands
[1].isreg
)
15434 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15436 NEON_ENCODE (INTEGER
, inst
);
15437 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15438 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15439 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15440 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15441 inst
.instruction
|= neon_quad (rs
) << 6;
15445 NEON_ENCODE (IMMED
, inst
);
15446 neon_move_immediate ();
15449 neon_dp_fixup (&inst
);
15452 /* Encode instructions of form:
15454 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15455 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15458 neon_mixed_length (struct neon_type_el et
, unsigned size
)
15460 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15461 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15462 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15463 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15464 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15465 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15466 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
15467 inst
.instruction
|= neon_logbits (size
) << 20;
15469 neon_dp_fixup (&inst
);
15473 do_neon_dyadic_long (void)
15475 /* FIXME: Type checking for lengthening op. */
15476 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15477 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15478 neon_mixed_length (et
, et
.size
);
15482 do_neon_abal (void)
15484 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15485 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15486 neon_mixed_length (et
, et
.size
);
15490 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
15492 if (inst
.operands
[2].isscalar
)
15494 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
15495 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
15496 NEON_ENCODE (SCALAR
, inst
);
15497 neon_mul_mac (et
, et
.type
== NT_unsigned
);
15501 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15502 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
15503 NEON_ENCODE (INTEGER
, inst
);
15504 neon_mixed_length (et
, et
.size
);
15509 do_neon_mac_maybe_scalar_long (void)
15511 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
15515 do_neon_dyadic_wide (void)
15517 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
15518 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15519 neon_mixed_length (et
, et
.size
);
15523 do_neon_dyadic_narrow (void)
15525 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15526 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
15527 /* Operand sign is unimportant, and the U bit is part of the opcode,
15528 so force the operand type to integer. */
15529 et
.type
= NT_integer
;
15530 neon_mixed_length (et
, et
.size
/ 2);
15534 do_neon_mul_sat_scalar_long (void)
15536 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
15540 do_neon_vmull (void)
15542 if (inst
.operands
[2].isscalar
)
15543 do_neon_mac_maybe_scalar_long ();
15546 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15547 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
15549 if (et
.type
== NT_poly
)
15550 NEON_ENCODE (POLY
, inst
);
15552 NEON_ENCODE (INTEGER
, inst
);
15554 /* For polynomial encoding the U bit must be zero, and the size must
15555 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15556 obviously, as 0b10). */
15559 /* Check we're on the correct architecture. */
15560 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
15562 _("Instruction form not available on this architecture.");
15567 neon_mixed_length (et
, et
.size
);
15574 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
15575 struct neon_type_el et
= neon_check_type (3, rs
,
15576 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15577 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
15579 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
15580 _("shift out of range"));
15581 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15582 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15583 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15584 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15585 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15586 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15587 inst
.instruction
|= neon_quad (rs
) << 6;
15588 inst
.instruction
|= imm
<< 8;
15590 neon_dp_fixup (&inst
);
15596 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15597 struct neon_type_el et
= neon_check_type (2, rs
,
15598 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15599 unsigned op
= (inst
.instruction
>> 7) & 3;
15600 /* N (width of reversed regions) is encoded as part of the bitmask. We
15601 extract it here to check the elements to be reversed are smaller.
15602 Otherwise we'd get a reserved instruction. */
15603 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
15604 gas_assert (elsize
!= 0);
15605 constraint (et
.size
>= elsize
,
15606 _("elements must be smaller than reversal region"));
15607 neon_two_same (neon_quad (rs
), 1, et
.size
);
15613 if (inst
.operands
[1].isscalar
)
15615 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
15616 struct neon_type_el et
= neon_check_type (2, rs
,
15617 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15618 unsigned sizebits
= et
.size
>> 3;
15619 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
15620 int logsize
= neon_logbits (et
.size
);
15621 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
15623 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
15626 NEON_ENCODE (SCALAR
, inst
);
15627 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15628 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15629 inst
.instruction
|= LOW4 (dm
);
15630 inst
.instruction
|= HI1 (dm
) << 5;
15631 inst
.instruction
|= neon_quad (rs
) << 6;
15632 inst
.instruction
|= x
<< 17;
15633 inst
.instruction
|= sizebits
<< 16;
15635 neon_dp_fixup (&inst
);
15639 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
15640 struct neon_type_el et
= neon_check_type (2, rs
,
15641 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
15642 /* Duplicate ARM register to lanes of vector. */
15643 NEON_ENCODE (ARMREG
, inst
);
15646 case 8: inst
.instruction
|= 0x400000; break;
15647 case 16: inst
.instruction
|= 0x000020; break;
15648 case 32: inst
.instruction
|= 0x000000; break;
15651 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
15652 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
15653 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
15654 inst
.instruction
|= neon_quad (rs
) << 21;
15655 /* The encoding for this instruction is identical for the ARM and Thumb
15656 variants, except for the condition field. */
15657 do_vfp_cond_or_thumb ();
15661 /* VMOV has particularly many variations. It can be one of:
15662 0. VMOV<c><q> <Qd>, <Qm>
15663 1. VMOV<c><q> <Dd>, <Dm>
15664 (Register operations, which are VORR with Rm = Rn.)
15665 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15666 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15668 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15669 (ARM register to scalar.)
15670 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15671 (Two ARM registers to vector.)
15672 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15673 (Scalar to ARM register.)
15674 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15675 (Vector to two ARM registers.)
15676 8. VMOV.F32 <Sd>, <Sm>
15677 9. VMOV.F64 <Dd>, <Dm>
15678 (VFP register moves.)
15679 10. VMOV.F32 <Sd>, #imm
15680 11. VMOV.F64 <Dd>, #imm
15681 (VFP float immediate load.)
15682 12. VMOV <Rd>, <Sm>
15683 (VFP single to ARM reg.)
15684 13. VMOV <Sd>, <Rm>
15685 (ARM reg to VFP single.)
15686 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15687 (Two ARM regs to two VFP singles.)
15688 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15689 (Two VFP singles to two ARM regs.)
15691 These cases can be disambiguated using neon_select_shape, except cases 1/9
15692 and 3/11 which depend on the operand type too.
15694 All the encoded bits are hardcoded by this function.
15696 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15697 Cases 5, 7 may be used with VFPv2 and above.
15699 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15700 can specify a type where it doesn't make sense to, and is ignored). */
15705 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
15706 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
15708 struct neon_type_el et
;
15709 const char *ldconst
= 0;
15713 case NS_DD
: /* case 1/9. */
15714 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
15715 /* It is not an error here if no type is given. */
15717 if (et
.type
== NT_float
&& et
.size
== 64)
15719 do_vfp_nsyn_opcode ("fcpyd");
15722 /* fall through. */
15724 case NS_QQ
: /* case 0/1. */
15726 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15728 /* The architecture manual I have doesn't explicitly state which
15729 value the U bit should have for register->register moves, but
15730 the equivalent VORR instruction has U = 0, so do that. */
15731 inst
.instruction
= 0x0200110;
15732 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15733 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15734 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15735 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15736 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15737 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15738 inst
.instruction
|= neon_quad (rs
) << 6;
15740 neon_dp_fixup (&inst
);
15744 case NS_DI
: /* case 3/11. */
15745 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
15747 if (et
.type
== NT_float
&& et
.size
== 64)
15749 /* case 11 (fconstd). */
15750 ldconst
= "fconstd";
15751 goto encode_fconstd
;
15753 /* fall through. */
15755 case NS_QI
: /* case 2/3. */
15756 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15758 inst
.instruction
= 0x0800010;
15759 neon_move_immediate ();
15760 neon_dp_fixup (&inst
);
15763 case NS_SR
: /* case 4. */
15765 unsigned bcdebits
= 0;
15767 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
15768 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
15770 /* .<size> is optional here, defaulting to .32. */
15771 if (inst
.vectype
.elems
== 0
15772 && inst
.operands
[0].vectype
.type
== NT_invtype
15773 && inst
.operands
[1].vectype
.type
== NT_invtype
)
15775 inst
.vectype
.el
[0].type
= NT_untyped
;
15776 inst
.vectype
.el
[0].size
= 32;
15777 inst
.vectype
.elems
= 1;
15780 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
15781 logsize
= neon_logbits (et
.size
);
15783 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
15785 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
15786 && et
.size
!= 32, _(BAD_FPU
));
15787 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
15788 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
15792 case 8: bcdebits
= 0x8; break;
15793 case 16: bcdebits
= 0x1; break;
15794 case 32: bcdebits
= 0x0; break;
15798 bcdebits
|= x
<< logsize
;
15800 inst
.instruction
= 0xe000b10;
15801 do_vfp_cond_or_thumb ();
15802 inst
.instruction
|= LOW4 (dn
) << 16;
15803 inst
.instruction
|= HI1 (dn
) << 7;
15804 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
15805 inst
.instruction
|= (bcdebits
& 3) << 5;
15806 inst
.instruction
|= (bcdebits
>> 2) << 21;
15810 case NS_DRR
: /* case 5 (fmdrr). */
15811 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
15814 inst
.instruction
= 0xc400b10;
15815 do_vfp_cond_or_thumb ();
15816 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
15817 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
15818 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
15819 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
15822 case NS_RS
: /* case 6. */
15825 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
15826 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
15827 unsigned abcdebits
= 0;
15829 /* .<dt> is optional here, defaulting to .32. */
15830 if (inst
.vectype
.elems
== 0
15831 && inst
.operands
[0].vectype
.type
== NT_invtype
15832 && inst
.operands
[1].vectype
.type
== NT_invtype
)
15834 inst
.vectype
.el
[0].type
= NT_untyped
;
15835 inst
.vectype
.el
[0].size
= 32;
15836 inst
.vectype
.elems
= 1;
15839 et
= neon_check_type (2, NS_NULL
,
15840 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
15841 logsize
= neon_logbits (et
.size
);
15843 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
15845 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
15846 && et
.size
!= 32, _(BAD_FPU
));
15847 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
15848 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
15852 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
15853 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
15854 case 32: abcdebits
= 0x00; break;
15858 abcdebits
|= x
<< logsize
;
15859 inst
.instruction
= 0xe100b10;
15860 do_vfp_cond_or_thumb ();
15861 inst
.instruction
|= LOW4 (dn
) << 16;
15862 inst
.instruction
|= HI1 (dn
) << 7;
15863 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
15864 inst
.instruction
|= (abcdebits
& 3) << 5;
15865 inst
.instruction
|= (abcdebits
>> 2) << 21;
15869 case NS_RRD
: /* case 7 (fmrrd). */
15870 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
15873 inst
.instruction
= 0xc500b10;
15874 do_vfp_cond_or_thumb ();
15875 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
15876 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
15877 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15878 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15881 case NS_FF
: /* case 8 (fcpys). */
15882 do_vfp_nsyn_opcode ("fcpys");
15885 case NS_FI
: /* case 10 (fconsts). */
15886 ldconst
= "fconsts";
15888 if (is_quarter_float (inst
.operands
[1].imm
))
15890 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
15891 do_vfp_nsyn_opcode (ldconst
);
15894 first_error (_("immediate out of range"));
15897 case NS_RF
: /* case 12 (fmrs). */
15898 do_vfp_nsyn_opcode ("fmrs");
15901 case NS_FR
: /* case 13 (fmsr). */
15902 do_vfp_nsyn_opcode ("fmsr");
15905 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15906 (one of which is a list), but we have parsed four. Do some fiddling to
15907 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15909 case NS_RRFF
: /* case 14 (fmrrs). */
15910 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
15911 _("VFP registers must be adjacent"));
15912 inst
.operands
[2].imm
= 2;
15913 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
15914 do_vfp_nsyn_opcode ("fmrrs");
15917 case NS_FFRR
: /* case 15 (fmsrr). */
15918 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
15919 _("VFP registers must be adjacent"));
15920 inst
.operands
[1] = inst
.operands
[2];
15921 inst
.operands
[2] = inst
.operands
[3];
15922 inst
.operands
[0].imm
= 2;
15923 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
15924 do_vfp_nsyn_opcode ("fmsrr");
15928 /* neon_select_shape has determined that the instruction
15929 shape is wrong and has already set the error message. */
15938 do_neon_rshift_round_imm (void)
15940 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15941 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15942 int imm
= inst
.operands
[2].imm
;
15944 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15947 inst
.operands
[2].present
= 0;
15952 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15953 _("immediate out of range for shift"));
15954 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
15959 do_neon_movl (void)
15961 struct neon_type_el et
= neon_check_type (2, NS_QD
,
15962 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15963 unsigned sizebits
= et
.size
>> 3;
15964 inst
.instruction
|= sizebits
<< 19;
15965 neon_two_same (0, et
.type
== NT_unsigned
, -1);
15971 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15972 struct neon_type_el et
= neon_check_type (2, rs
,
15973 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15974 NEON_ENCODE (INTEGER
, inst
);
15975 neon_two_same (neon_quad (rs
), 1, et
.size
);
15979 do_neon_zip_uzp (void)
15981 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15982 struct neon_type_el et
= neon_check_type (2, rs
,
15983 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15984 if (rs
== NS_DD
&& et
.size
== 32)
15986 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15987 inst
.instruction
= N_MNEM_vtrn
;
15991 neon_two_same (neon_quad (rs
), 1, et
.size
);
15995 do_neon_sat_abs_neg (void)
15997 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15998 struct neon_type_el et
= neon_check_type (2, rs
,
15999 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16000 neon_two_same (neon_quad (rs
), 1, et
.size
);
16004 do_neon_pair_long (void)
16006 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16007 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16008 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16009 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16010 neon_two_same (neon_quad (rs
), 1, et
.size
);
16014 do_neon_recip_est (void)
16016 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16017 struct neon_type_el et
= neon_check_type (2, rs
,
16018 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
16019 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16020 neon_two_same (neon_quad (rs
), 1, et
.size
);
16026 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16027 struct neon_type_el et
= neon_check_type (2, rs
,
16028 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16029 neon_two_same (neon_quad (rs
), 1, et
.size
);
16035 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16036 struct neon_type_el et
= neon_check_type (2, rs
,
16037 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16038 neon_two_same (neon_quad (rs
), 1, et
.size
);
16044 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16045 struct neon_type_el et
= neon_check_type (2, rs
,
16046 N_EQK
| N_INT
, N_8
| N_KEY
);
16047 neon_two_same (neon_quad (rs
), 1, et
.size
);
16053 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16054 neon_two_same (neon_quad (rs
), 1, -1);
16058 do_neon_tbl_tbx (void)
16060 unsigned listlenbits
;
16061 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16063 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16065 first_error (_("bad list length for table lookup"));
16069 listlenbits
= inst
.operands
[1].imm
- 1;
16070 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16071 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16072 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16073 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16074 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16075 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16076 inst
.instruction
|= listlenbits
<< 8;
16078 neon_dp_fixup (&inst
);
16082 do_neon_ldm_stm (void)
16084 /* P, U and L bits are part of bitmask. */
16085 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16086 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16088 if (inst
.operands
[1].issingle
)
16090 do_vfp_nsyn_ldm_stm (is_dbmode
);
16094 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16095 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16097 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16098 _("register list must contain at least 1 and at most 16 "
16101 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16102 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16103 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16104 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16106 inst
.instruction
|= offsetbits
;
16108 do_vfp_cond_or_thumb ();
16112 do_neon_ldr_str (void)
16114 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16116 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16117 And is UNPREDICTABLE in thumb mode. */
16119 && inst
.operands
[1].reg
== REG_PC
16120 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16123 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16124 else if (warn_on_deprecated
)
16125 as_tsktsk (_("Use of PC here is deprecated"));
16128 if (inst
.operands
[0].issingle
)
16131 do_vfp_nsyn_opcode ("flds");
16133 do_vfp_nsyn_opcode ("fsts");
16138 do_vfp_nsyn_opcode ("fldd");
16140 do_vfp_nsyn_opcode ("fstd");
16144 /* "interleave" version also handles non-interleaving register VLD1/VST1
16148 do_neon_ld_st_interleave (void)
16150 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16151 N_8
| N_16
| N_32
| N_64
);
16152 unsigned alignbits
= 0;
16154 /* The bits in this table go:
16155 0: register stride of one (0) or two (1)
16156 1,2: register list length, minus one (1, 2, 3, 4).
16157 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16158 We use -1 for invalid entries. */
16159 const int typetable
[] =
16161 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16162 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16163 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16164 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16168 if (et
.type
== NT_invtype
)
16171 if (inst
.operands
[1].immisalign
)
16172 switch (inst
.operands
[1].imm
>> 8)
16174 case 64: alignbits
= 1; break;
16176 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16177 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16178 goto bad_alignment
;
16182 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16183 goto bad_alignment
;
16188 first_error (_("bad alignment"));
16192 inst
.instruction
|= alignbits
<< 4;
16193 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16195 /* Bits [4:6] of the immediate in a list specifier encode register stride
16196 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16197 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16198 up the right value for "type" in a table based on this value and the given
16199 list style, then stick it back. */
16200 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16201 | (((inst
.instruction
>> 8) & 3) << 3);
16203 typebits
= typetable
[idx
];
16205 constraint (typebits
== -1, _("bad list type for instruction"));
16206 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16207 _("bad element type for instruction"));
16209 inst
.instruction
&= ~0xf00;
16210 inst
.instruction
|= typebits
<< 8;
16213 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16214 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16215 otherwise. The variable arguments are a list of pairs of legal (size, align)
16216 values, terminated with -1. */
16219 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
16222 int result
= FAIL
, thissize
, thisalign
;
16224 if (!inst
.operands
[1].immisalign
)
16230 va_start (ap
, do_align
);
16234 thissize
= va_arg (ap
, int);
16235 if (thissize
== -1)
16237 thisalign
= va_arg (ap
, int);
16239 if (size
== thissize
&& align
== thisalign
)
16242 while (result
!= SUCCESS
);
16246 if (result
== SUCCESS
)
16249 first_error (_("unsupported alignment for instruction"));
16255 do_neon_ld_st_lane (void)
16257 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16258 int align_good
, do_align
= 0;
16259 int logsize
= neon_logbits (et
.size
);
16260 int align
= inst
.operands
[1].imm
>> 8;
16261 int n
= (inst
.instruction
>> 8) & 3;
16262 int max_el
= 64 / et
.size
;
16264 if (et
.type
== NT_invtype
)
16267 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16268 _("bad list length"));
16269 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16270 _("scalar index out of range"));
16271 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16273 _("stride of 2 unavailable when element size is 8"));
16277 case 0: /* VLD1 / VST1. */
16278 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
16280 if (align_good
== FAIL
)
16284 unsigned alignbits
= 0;
16287 case 16: alignbits
= 0x1; break;
16288 case 32: alignbits
= 0x3; break;
16291 inst
.instruction
|= alignbits
<< 4;
16295 case 1: /* VLD2 / VST2. */
16296 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
16298 if (align_good
== FAIL
)
16301 inst
.instruction
|= 1 << 4;
16304 case 2: /* VLD3 / VST3. */
16305 constraint (inst
.operands
[1].immisalign
,
16306 _("can't use alignment with this instruction"));
16309 case 3: /* VLD4 / VST4. */
16310 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16311 16, 64, 32, 64, 32, 128, -1);
16312 if (align_good
== FAIL
)
16316 unsigned alignbits
= 0;
16319 case 8: alignbits
= 0x1; break;
16320 case 16: alignbits
= 0x1; break;
16321 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16324 inst
.instruction
|= alignbits
<< 4;
16331 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16332 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16333 inst
.instruction
|= 1 << (4 + logsize
);
16335 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16336 inst
.instruction
|= logsize
<< 10;
16339 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16342 do_neon_ld_dup (void)
16344 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16345 int align_good
, do_align
= 0;
16347 if (et
.type
== NT_invtype
)
16350 switch ((inst
.instruction
>> 8) & 3)
16352 case 0: /* VLD1. */
16353 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16354 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16355 &do_align
, 16, 16, 32, 32, -1);
16356 if (align_good
== FAIL
)
16358 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
16361 case 2: inst
.instruction
|= 1 << 5; break;
16362 default: first_error (_("bad list length")); return;
16364 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16367 case 1: /* VLD2. */
16368 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16369 &do_align
, 8, 16, 16, 32, 32, 64, -1);
16370 if (align_good
== FAIL
)
16372 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
16373 _("bad list length"));
16374 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16375 inst
.instruction
|= 1 << 5;
16376 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16379 case 2: /* VLD3. */
16380 constraint (inst
.operands
[1].immisalign
,
16381 _("can't use alignment with this instruction"));
16382 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
16383 _("bad list length"));
16384 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16385 inst
.instruction
|= 1 << 5;
16386 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16389 case 3: /* VLD4. */
16391 int align
= inst
.operands
[1].imm
>> 8;
16392 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16393 16, 64, 32, 64, 32, 128, -1);
16394 if (align_good
== FAIL
)
16396 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
16397 _("bad list length"));
16398 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16399 inst
.instruction
|= 1 << 5;
16400 if (et
.size
== 32 && align
== 128)
16401 inst
.instruction
|= 0x3 << 6;
16403 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16410 inst
.instruction
|= do_align
<< 4;
16413 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16414 apart from bits [11:4]. */
16417 do_neon_ldx_stx (void)
16419 if (inst
.operands
[1].isreg
)
16420 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
16422 switch (NEON_LANE (inst
.operands
[0].imm
))
16424 case NEON_INTERLEAVE_LANES
:
16425 NEON_ENCODE (INTERLV
, inst
);
16426 do_neon_ld_st_interleave ();
16429 case NEON_ALL_LANES
:
16430 NEON_ENCODE (DUP
, inst
);
16431 if (inst
.instruction
== N_INV
)
16433 first_error ("only loads support such operands");
16440 NEON_ENCODE (LANE
, inst
);
16441 do_neon_ld_st_lane ();
16444 /* L bit comes from bit mask. */
16445 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16446 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16447 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16449 if (inst
.operands
[1].postind
)
16451 int postreg
= inst
.operands
[1].imm
& 0xf;
16452 constraint (!inst
.operands
[1].immisreg
,
16453 _("post-index must be a register"));
16454 constraint (postreg
== 0xd || postreg
== 0xf,
16455 _("bad register for post-index"));
16456 inst
.instruction
|= postreg
;
16460 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
16461 constraint (inst
.reloc
.exp
.X_op
!= O_constant
16462 || inst
.reloc
.exp
.X_add_number
!= 0,
16465 if (inst
.operands
[1].writeback
)
16467 inst
.instruction
|= 0xd;
16470 inst
.instruction
|= 0xf;
16474 inst
.instruction
|= 0xf9000000;
16476 inst
.instruction
|= 0xf4000000;
16481 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
16483 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16484 D register operands. */
16485 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16486 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16489 NEON_ENCODE (FPV8
, inst
);
16492 do_vfp_sp_dyadic ();
16494 do_vfp_dp_rd_rn_rm ();
16497 inst
.instruction
|= 0x100;
16499 inst
.instruction
|= 0xf0000000;
16505 set_it_insn_type (OUTSIDE_IT_INSN
);
16507 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
16508 first_error (_("invalid instruction shape"));
16514 set_it_insn_type (OUTSIDE_IT_INSN
);
16516 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
16519 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16522 neon_dyadic_misc (NT_untyped
, N_F32
, 0);
16526 do_vrint_1 (enum neon_cvt_mode mode
)
16528 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
16529 struct neon_type_el et
;
16534 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16535 D register operands. */
16536 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16537 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16540 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
16541 if (et
.type
!= NT_invtype
)
16543 /* VFP encodings. */
16544 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
16545 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
16546 set_it_insn_type (OUTSIDE_IT_INSN
);
16548 NEON_ENCODE (FPV8
, inst
);
16550 do_vfp_sp_monadic ();
16552 do_vfp_dp_rd_rm ();
16556 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
16557 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
16558 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
16559 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
16560 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
16561 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
16562 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
16566 inst
.instruction
|= (rs
== NS_DD
) << 8;
16567 do_vfp_cond_or_thumb ();
16571 /* Neon encodings (or something broken...). */
16573 et
= neon_check_type (2, rs
, N_EQK
, N_F32
| N_KEY
);
16575 if (et
.type
== NT_invtype
)
16578 set_it_insn_type (OUTSIDE_IT_INSN
);
16579 NEON_ENCODE (FLOAT
, inst
);
16581 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16584 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16585 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16586 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16587 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16588 inst
.instruction
|= neon_quad (rs
) << 6;
16591 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
16592 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
16593 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
16594 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
16595 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
16596 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
16597 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
16602 inst
.instruction
|= 0xfc000000;
16604 inst
.instruction
|= 0xf0000000;
16611 do_vrint_1 (neon_cvt_mode_x
);
16617 do_vrint_1 (neon_cvt_mode_z
);
16623 do_vrint_1 (neon_cvt_mode_r
);
16629 do_vrint_1 (neon_cvt_mode_a
);
16635 do_vrint_1 (neon_cvt_mode_n
);
16641 do_vrint_1 (neon_cvt_mode_p
);
16647 do_vrint_1 (neon_cvt_mode_m
);
16650 /* Crypto v1 instructions. */
16652 do_crypto_2op_1 (unsigned elttype
, int op
)
16654 set_it_insn_type (OUTSIDE_IT_INSN
);
16656 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
16662 NEON_ENCODE (INTEGER
, inst
);
16663 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16664 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16665 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16666 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16668 inst
.instruction
|= op
<< 6;
16671 inst
.instruction
|= 0xfc000000;
16673 inst
.instruction
|= 0xf0000000;
16677 do_crypto_3op_1 (int u
, int op
)
16679 set_it_insn_type (OUTSIDE_IT_INSN
);
16681 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
16682 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
16687 NEON_ENCODE (INTEGER
, inst
);
16688 neon_three_same (1, u
, 8 << op
);
16694 do_crypto_2op_1 (N_8
, 0);
16700 do_crypto_2op_1 (N_8
, 1);
16706 do_crypto_2op_1 (N_8
, 2);
16712 do_crypto_2op_1 (N_8
, 3);
16718 do_crypto_3op_1 (0, 0);
16724 do_crypto_3op_1 (0, 1);
16730 do_crypto_3op_1 (0, 2);
16736 do_crypto_3op_1 (0, 3);
16742 do_crypto_3op_1 (1, 0);
16748 do_crypto_3op_1 (1, 1);
16752 do_sha256su1 (void)
16754 do_crypto_3op_1 (1, 2);
16760 do_crypto_2op_1 (N_32
, -1);
16766 do_crypto_2op_1 (N_32
, 0);
16770 do_sha256su0 (void)
16772 do_crypto_2op_1 (N_32
, 1);
16776 do_crc32_1 (unsigned int poly
, unsigned int sz
)
16778 unsigned int Rd
= inst
.operands
[0].reg
;
16779 unsigned int Rn
= inst
.operands
[1].reg
;
16780 unsigned int Rm
= inst
.operands
[2].reg
;
16782 set_it_insn_type (OUTSIDE_IT_INSN
);
16783 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
16784 inst
.instruction
|= LOW4 (Rn
) << 16;
16785 inst
.instruction
|= LOW4 (Rm
);
16786 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
16787 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
16789 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
16790 as_warn (UNPRED_REG ("r15"));
16791 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
16792 as_warn (UNPRED_REG ("r13"));
16832 /* Overall per-instruction processing. */
16834 /* We need to be able to fix up arbitrary expressions in some statements.
16835 This is so that we can handle symbols that are an arbitrary distance from
16836 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16837 which returns part of an address in a form which will be valid for
16838 a data instruction. We do this by pushing the expression into a symbol
16839 in the expr_section, and creating a fix for that. */
16842 fix_new_arm (fragS
* frag
,
16856 /* Create an absolute valued symbol, so we have something to
16857 refer to in the object file. Unfortunately for us, gas's
16858 generic expression parsing will already have folded out
16859 any use of .set foo/.type foo %function that may have
16860 been used to set type information of the target location,
16861 that's being specified symbolically. We have to presume
16862 the user knows what they are doing. */
16866 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
16868 symbol
= symbol_find_or_make (name
);
16869 S_SET_SEGMENT (symbol
, absolute_section
);
16870 symbol_set_frag (symbol
, &zero_address_frag
);
16871 S_SET_VALUE (symbol
, exp
->X_add_number
);
16872 exp
->X_op
= O_symbol
;
16873 exp
->X_add_symbol
= symbol
;
16874 exp
->X_add_number
= 0;
16880 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
16881 (enum bfd_reloc_code_real
) reloc
);
16885 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
16886 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
16890 /* Mark whether the fix is to a THUMB instruction, or an ARM
16892 new_fix
->tc_fix_data
= thumb_mode
;
16895 /* Create a frg for an instruction requiring relaxation. */
16897 output_relax_insn (void)
16903 /* The size of the instruction is unknown, so tie the debug info to the
16904 start of the instruction. */
16905 dwarf2_emit_insn (0);
16907 switch (inst
.reloc
.exp
.X_op
)
16910 sym
= inst
.reloc
.exp
.X_add_symbol
;
16911 offset
= inst
.reloc
.exp
.X_add_number
;
16915 offset
= inst
.reloc
.exp
.X_add_number
;
16918 sym
= make_expr_symbol (&inst
.reloc
.exp
);
16922 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
16923 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
16924 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
16927 /* Write a 32-bit thumb instruction to buf. */
16929 put_thumb32_insn (char * buf
, unsigned long insn
)
16931 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
16932 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
16936 output_inst (const char * str
)
16942 as_bad ("%s -- `%s'", inst
.error
, str
);
16947 output_relax_insn ();
16950 if (inst
.size
== 0)
16953 to
= frag_more (inst
.size
);
16954 /* PR 9814: Record the thumb mode into the current frag so that we know
16955 what type of NOP padding to use, if necessary. We override any previous
16956 setting so that if the mode has changed then the NOPS that we use will
16957 match the encoding of the last instruction in the frag. */
16958 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
16960 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
16962 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
16963 put_thumb32_insn (to
, inst
.instruction
);
16965 else if (inst
.size
> INSN_SIZE
)
16967 gas_assert (inst
.size
== (2 * INSN_SIZE
));
16968 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
16969 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
16972 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
16974 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
16975 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
16976 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
16979 dwarf2_emit_insn (inst
.size
);
16983 output_it_inst (int cond
, int mask
, char * to
)
16985 unsigned long instruction
= 0xbf00;
16988 instruction
|= mask
;
16989 instruction
|= cond
<< 4;
16993 to
= frag_more (2);
16995 dwarf2_emit_insn (2);
16999 md_number_to_chars (to
, instruction
, 2);
17004 /* Tag values used in struct asm_opcode's tag field. */
17007 OT_unconditional
, /* Instruction cannot be conditionalized.
17008 The ARM condition field is still 0xE. */
17009 OT_unconditionalF
, /* Instruction cannot be conditionalized
17010 and carries 0xF in its ARM condition field. */
17011 OT_csuffix
, /* Instruction takes a conditional suffix. */
17012 OT_csuffixF
, /* Some forms of the instruction take a conditional
17013 suffix, others place 0xF where the condition field
17015 OT_cinfix3
, /* Instruction takes a conditional infix,
17016 beginning at character index 3. (In
17017 unified mode, it becomes a suffix.) */
17018 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17019 tsts, cmps, cmns, and teqs. */
17020 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17021 character index 3, even in unified mode. Used for
17022 legacy instructions where suffix and infix forms
17023 may be ambiguous. */
17024 OT_csuf_or_in3
, /* Instruction takes either a conditional
17025 suffix or an infix at character index 3. */
17026 OT_odd_infix_unc
, /* This is the unconditional variant of an
17027 instruction that takes a conditional infix
17028 at an unusual position. In unified mode,
17029 this variant will accept a suffix. */
17030 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17031 are the conditional variants of instructions that
17032 take conditional infixes in unusual positions.
17033 The infix appears at character index
17034 (tag - OT_odd_infix_0). These are not accepted
17035 in unified mode. */
17038 /* Subroutine of md_assemble, responsible for looking up the primary
17039 opcode from the mnemonic the user wrote. STR points to the
17040 beginning of the mnemonic.
17042 This is not simply a hash table lookup, because of conditional
17043 variants. Most instructions have conditional variants, which are
17044 expressed with a _conditional affix_ to the mnemonic. If we were
17045 to encode each conditional variant as a literal string in the opcode
17046 table, it would have approximately 20,000 entries.
17048 Most mnemonics take this affix as a suffix, and in unified syntax,
17049 'most' is upgraded to 'all'. However, in the divided syntax, some
17050 instructions take the affix as an infix, notably the s-variants of
17051 the arithmetic instructions. Of those instructions, all but six
17052 have the infix appear after the third character of the mnemonic.
17054 Accordingly, the algorithm for looking up primary opcodes given
17057 1. Look up the identifier in the opcode table.
17058 If we find a match, go to step U.
17060 2. Look up the last two characters of the identifier in the
17061 conditions table. If we find a match, look up the first N-2
17062 characters of the identifier in the opcode table. If we
17063 find a match, go to step CE.
17065 3. Look up the fourth and fifth characters of the identifier in
17066 the conditions table. If we find a match, extract those
17067 characters from the identifier, and look up the remaining
17068 characters in the opcode table. If we find a match, go
17073 U. Examine the tag field of the opcode structure, in case this is
17074 one of the six instructions with its conditional infix in an
17075 unusual place. If it is, the tag tells us where to find the
17076 infix; look it up in the conditions table and set inst.cond
17077 accordingly. Otherwise, this is an unconditional instruction.
17078 Again set inst.cond accordingly. Return the opcode structure.
17080 CE. Examine the tag field to make sure this is an instruction that
17081 should receive a conditional suffix. If it is not, fail.
17082 Otherwise, set inst.cond from the suffix we already looked up,
17083 and return the opcode structure.
17085 CM. Examine the tag field to make sure this is an instruction that
17086 should receive a conditional infix after the third character.
17087 If it is not, fail. Otherwise, undo the edits to the current
17088 line of input and proceed as for case CE. */
17090 static const struct asm_opcode
*
17091 opcode_lookup (char **str
)
17095 const struct asm_opcode
*opcode
;
17096 const struct asm_cond
*cond
;
17099 /* Scan up to the end of the mnemonic, which must end in white space,
17100 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17101 for (base
= end
= *str
; *end
!= '\0'; end
++)
17102 if (*end
== ' ' || *end
== '.')
17108 /* Handle a possible width suffix and/or Neon type suffix. */
17113 /* The .w and .n suffixes are only valid if the unified syntax is in
17115 if (unified_syntax
&& end
[1] == 'w')
17117 else if (unified_syntax
&& end
[1] == 'n')
17122 inst
.vectype
.elems
= 0;
17124 *str
= end
+ offset
;
17126 if (end
[offset
] == '.')
17128 /* See if we have a Neon type suffix (possible in either unified or
17129 non-unified ARM syntax mode). */
17130 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17133 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17139 /* Look for unaffixed or special-case affixed mnemonic. */
17140 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17145 if (opcode
->tag
< OT_odd_infix_0
)
17147 inst
.cond
= COND_ALWAYS
;
17151 if (warn_on_deprecated
&& unified_syntax
)
17152 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17153 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17154 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17157 inst
.cond
= cond
->value
;
17161 /* Cannot have a conditional suffix on a mnemonic of less than two
17163 if (end
- base
< 3)
17166 /* Look for suffixed mnemonic. */
17168 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17169 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17171 if (opcode
&& cond
)
17174 switch (opcode
->tag
)
17176 case OT_cinfix3_legacy
:
17177 /* Ignore conditional suffixes matched on infix only mnemonics. */
17181 case OT_cinfix3_deprecated
:
17182 case OT_odd_infix_unc
:
17183 if (!unified_syntax
)
17185 /* else fall through */
17189 case OT_csuf_or_in3
:
17190 inst
.cond
= cond
->value
;
17193 case OT_unconditional
:
17194 case OT_unconditionalF
:
17196 inst
.cond
= cond
->value
;
17199 /* Delayed diagnostic. */
17200 inst
.error
= BAD_COND
;
17201 inst
.cond
= COND_ALWAYS
;
17210 /* Cannot have a usual-position infix on a mnemonic of less than
17211 six characters (five would be a suffix). */
17212 if (end
- base
< 6)
17215 /* Look for infixed mnemonic in the usual position. */
17217 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17221 memcpy (save
, affix
, 2);
17222 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17223 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17225 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17226 memcpy (affix
, save
, 2);
17229 && (opcode
->tag
== OT_cinfix3
17230 || opcode
->tag
== OT_cinfix3_deprecated
17231 || opcode
->tag
== OT_csuf_or_in3
17232 || opcode
->tag
== OT_cinfix3_legacy
))
17235 if (warn_on_deprecated
&& unified_syntax
17236 && (opcode
->tag
== OT_cinfix3
17237 || opcode
->tag
== OT_cinfix3_deprecated
))
17238 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17240 inst
.cond
= cond
->value
;
17247 /* This function generates an initial IT instruction, leaving its block
17248 virtually open for the new instructions. Eventually,
17249 the mask will be updated by now_it_add_mask () each time
17250 a new instruction needs to be included in the IT block.
17251 Finally, the block is closed with close_automatic_it_block ().
17252 The block closure can be requested either from md_assemble (),
17253 a tencode (), or due to a label hook. */
17256 new_automatic_it_block (int cond
)
17258 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17259 now_it
.mask
= 0x18;
17261 now_it
.block_length
= 1;
17262 mapping_state (MAP_THUMB
);
17263 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
17264 now_it
.warn_deprecated
= FALSE
;
17265 now_it
.insn_cond
= TRUE
;
17268 /* Close an automatic IT block.
17269 See comments in new_automatic_it_block (). */
17272 close_automatic_it_block (void)
17274 now_it
.mask
= 0x10;
17275 now_it
.block_length
= 0;
17278 /* Update the mask of the current automatically-generated IT
17279 instruction. See comments in new_automatic_it_block (). */
17282 now_it_add_mask (int cond
)
17284 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17285 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17286 | ((bitvalue) << (nbit)))
17287 const int resulting_bit
= (cond
& 1);
17289 now_it
.mask
&= 0xf;
17290 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17292 (5 - now_it
.block_length
));
17293 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17295 ((5 - now_it
.block_length
) - 1) );
17296 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
17299 #undef SET_BIT_VALUE
17302 /* The IT blocks handling machinery is accessed through the these functions:
17303 it_fsm_pre_encode () from md_assemble ()
17304 set_it_insn_type () optional, from the tencode functions
17305 set_it_insn_type_last () ditto
17306 in_it_block () ditto
17307 it_fsm_post_encode () from md_assemble ()
17308 force_automatic_it_block_close () from label habdling functions
17311 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17312 initializing the IT insn type with a generic initial value depending
17313 on the inst.condition.
17314 2) During the tencode function, two things may happen:
17315 a) The tencode function overrides the IT insn type by
17316 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17317 b) The tencode function queries the IT block state by
17318 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17320 Both set_it_insn_type and in_it_block run the internal FSM state
17321 handling function (handle_it_state), because: a) setting the IT insn
17322 type may incur in an invalid state (exiting the function),
17323 and b) querying the state requires the FSM to be updated.
17324 Specifically we want to avoid creating an IT block for conditional
17325 branches, so it_fsm_pre_encode is actually a guess and we can't
17326 determine whether an IT block is required until the tencode () routine
17327 has decided what type of instruction this actually it.
17328 Because of this, if set_it_insn_type and in_it_block have to be used,
17329 set_it_insn_type has to be called first.
17331 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17332 determines the insn IT type depending on the inst.cond code.
17333 When a tencode () routine encodes an instruction that can be
17334 either outside an IT block, or, in the case of being inside, has to be
17335 the last one, set_it_insn_type_last () will determine the proper
17336 IT instruction type based on the inst.cond code. Otherwise,
17337 set_it_insn_type can be called for overriding that logic or
17338 for covering other cases.
17340 Calling handle_it_state () may not transition the IT block state to
17341 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17342 still queried. Instead, if the FSM determines that the state should
17343 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17344 after the tencode () function: that's what it_fsm_post_encode () does.
17346 Since in_it_block () calls the state handling function to get an
17347 updated state, an error may occur (due to invalid insns combination).
17348 In that case, inst.error is set.
17349 Therefore, inst.error has to be checked after the execution of
17350 the tencode () routine.
17352 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17353 any pending state change (if any) that didn't take place in
17354 handle_it_state () as explained above. */
17357 it_fsm_pre_encode (void)
17359 if (inst
.cond
!= COND_ALWAYS
)
17360 inst
.it_insn_type
= INSIDE_IT_INSN
;
17362 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
17364 now_it
.state_handled
= 0;
17367 /* IT state FSM handling function. */
17370 handle_it_state (void)
17372 now_it
.state_handled
= 1;
17373 now_it
.insn_cond
= FALSE
;
17375 switch (now_it
.state
)
17377 case OUTSIDE_IT_BLOCK
:
17378 switch (inst
.it_insn_type
)
17380 case OUTSIDE_IT_INSN
:
17383 case INSIDE_IT_INSN
:
17384 case INSIDE_IT_LAST_INSN
:
17385 if (thumb_mode
== 0)
17388 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
17389 as_tsktsk (_("Warning: conditional outside an IT block"\
17394 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
17395 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
))
17397 /* Automatically generate the IT instruction. */
17398 new_automatic_it_block (inst
.cond
);
17399 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
17400 close_automatic_it_block ();
17404 inst
.error
= BAD_OUT_IT
;
17410 case IF_INSIDE_IT_LAST_INSN
:
17411 case NEUTRAL_IT_INSN
:
17415 now_it
.state
= MANUAL_IT_BLOCK
;
17416 now_it
.block_length
= 0;
17421 case AUTOMATIC_IT_BLOCK
:
17422 /* Three things may happen now:
17423 a) We should increment current it block size;
17424 b) We should close current it block (closing insn or 4 insns);
17425 c) We should close current it block and start a new one (due
17426 to incompatible conditions or
17427 4 insns-length block reached). */
17429 switch (inst
.it_insn_type
)
17431 case OUTSIDE_IT_INSN
:
17432 /* The closure of the block shall happen immediatelly,
17433 so any in_it_block () call reports the block as closed. */
17434 force_automatic_it_block_close ();
17437 case INSIDE_IT_INSN
:
17438 case INSIDE_IT_LAST_INSN
:
17439 case IF_INSIDE_IT_LAST_INSN
:
17440 now_it
.block_length
++;
17442 if (now_it
.block_length
> 4
17443 || !now_it_compatible (inst
.cond
))
17445 force_automatic_it_block_close ();
17446 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
17447 new_automatic_it_block (inst
.cond
);
17451 now_it
.insn_cond
= TRUE
;
17452 now_it_add_mask (inst
.cond
);
17455 if (now_it
.state
== AUTOMATIC_IT_BLOCK
17456 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
17457 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
17458 close_automatic_it_block ();
17461 case NEUTRAL_IT_INSN
:
17462 now_it
.block_length
++;
17463 now_it
.insn_cond
= TRUE
;
17465 if (now_it
.block_length
> 4)
17466 force_automatic_it_block_close ();
17468 now_it_add_mask (now_it
.cc
& 1);
17472 close_automatic_it_block ();
17473 now_it
.state
= MANUAL_IT_BLOCK
;
17478 case MANUAL_IT_BLOCK
:
17480 /* Check conditional suffixes. */
17481 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
17484 now_it
.mask
&= 0x1f;
17485 is_last
= (now_it
.mask
== 0x10);
17486 now_it
.insn_cond
= TRUE
;
17488 switch (inst
.it_insn_type
)
17490 case OUTSIDE_IT_INSN
:
17491 inst
.error
= BAD_NOT_IT
;
17494 case INSIDE_IT_INSN
:
17495 if (cond
!= inst
.cond
)
17497 inst
.error
= BAD_IT_COND
;
17502 case INSIDE_IT_LAST_INSN
:
17503 case IF_INSIDE_IT_LAST_INSN
:
17504 if (cond
!= inst
.cond
)
17506 inst
.error
= BAD_IT_COND
;
17511 inst
.error
= BAD_BRANCH
;
17516 case NEUTRAL_IT_INSN
:
17517 /* The BKPT instruction is unconditional even in an IT block. */
17521 inst
.error
= BAD_IT_IT
;
17531 struct depr_insn_mask
17533 unsigned long pattern
;
17534 unsigned long mask
;
17535 const char* description
;
17538 /* List of 16-bit instruction patterns deprecated in an IT block in
17540 static const struct depr_insn_mask depr_it_insns
[] = {
17541 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17542 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17543 { 0xa000, 0xb800, N_("ADR") },
17544 { 0x4800, 0xf800, N_("Literal loads") },
17545 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17546 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17547 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17548 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17549 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17554 it_fsm_post_encode (void)
17558 if (!now_it
.state_handled
)
17559 handle_it_state ();
17561 if (now_it
.insn_cond
17562 && !now_it
.warn_deprecated
17563 && warn_on_deprecated
17564 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
17566 if (inst
.instruction
>= 0x10000)
17568 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17569 "deprecated in ARMv8"));
17570 now_it
.warn_deprecated
= TRUE
;
17574 const struct depr_insn_mask
*p
= depr_it_insns
;
17576 while (p
->mask
!= 0)
17578 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
17580 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17581 "of the following class are deprecated in ARMv8: "
17582 "%s"), p
->description
);
17583 now_it
.warn_deprecated
= TRUE
;
17591 if (now_it
.block_length
> 1)
17593 as_tsktsk (_("IT blocks containing more than one conditional "
17594 "instruction are deprecated in ARMv8"));
17595 now_it
.warn_deprecated
= TRUE
;
17599 is_last
= (now_it
.mask
== 0x10);
17602 now_it
.state
= OUTSIDE_IT_BLOCK
;
17608 force_automatic_it_block_close (void)
17610 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
17612 close_automatic_it_block ();
17613 now_it
.state
= OUTSIDE_IT_BLOCK
;
17621 if (!now_it
.state_handled
)
17622 handle_it_state ();
17624 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
17628 md_assemble (char *str
)
17631 const struct asm_opcode
* opcode
;
17633 /* Align the previous label if needed. */
17634 if (last_label_seen
!= NULL
)
17636 symbol_set_frag (last_label_seen
, frag_now
);
17637 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
17638 S_SET_SEGMENT (last_label_seen
, now_seg
);
17641 memset (&inst
, '\0', sizeof (inst
));
17642 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
17644 opcode
= opcode_lookup (&p
);
17647 /* It wasn't an instruction, but it might be a register alias of
17648 the form alias .req reg, or a Neon .dn/.qn directive. */
17649 if (! create_register_alias (str
, p
)
17650 && ! create_neon_reg_alias (str
, p
))
17651 as_bad (_("bad instruction `%s'"), str
);
17656 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
17657 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17659 /* The value which unconditional instructions should have in place of the
17660 condition field. */
17661 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
17665 arm_feature_set variant
;
17667 variant
= cpu_variant
;
17668 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17669 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
17670 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
17671 /* Check that this instruction is supported for this CPU. */
17672 if (!opcode
->tvariant
17673 || (thumb_mode
== 1
17674 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
17676 as_bad (_("selected processor does not support Thumb mode `%s'"), str
);
17679 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
17680 && opcode
->tencode
!= do_t_branch
)
17682 as_bad (_("Thumb does not support conditional execution"));
17686 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
))
17688 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
17689 && !(ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
)
17690 || ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_barrier
)))
17692 /* Two things are addressed here.
17693 1) Implicit require narrow instructions on Thumb-1.
17694 This avoids relaxation accidentally introducing Thumb-2
17696 2) Reject wide instructions in non Thumb-2 cores. */
17697 if (inst
.size_req
== 0)
17699 else if (inst
.size_req
== 4)
17701 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str
);
17707 inst
.instruction
= opcode
->tvalue
;
17709 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
17711 /* Prepare the it_insn_type for those encodings that don't set
17713 it_fsm_pre_encode ();
17715 opcode
->tencode ();
17717 it_fsm_post_encode ();
17720 if (!(inst
.error
|| inst
.relax
))
17722 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
17723 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
17724 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
17726 as_bad (_("cannot honor width suffix -- `%s'"), str
);
17731 /* Something has gone badly wrong if we try to relax a fixed size
17733 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
17735 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
17736 *opcode
->tvariant
);
17737 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17738 set those bits when Thumb-2 32-bit instructions are seen. ie.
17739 anything other than bl/blx and v6-M instructions.
17740 The impact of relaxable instructions will be considered later after we
17741 finish all relaxation. */
17742 if ((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
17743 && !(ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
17744 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
)))
17745 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
17748 check_neon_suffixes
;
17752 mapping_state (MAP_THUMB
);
17755 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
17759 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
17760 is_bx
= (opcode
->aencode
== do_bx
);
17762 /* Check that this instruction is supported for this CPU. */
17763 if (!(is_bx
&& fix_v4bx
)
17764 && !(opcode
->avariant
&&
17765 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
17767 as_bad (_("selected processor does not support ARM mode `%s'"), str
);
17772 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
17776 inst
.instruction
= opcode
->avalue
;
17777 if (opcode
->tag
== OT_unconditionalF
)
17778 inst
.instruction
|= 0xF << 28;
17780 inst
.instruction
|= inst
.cond
<< 28;
17781 inst
.size
= INSN_SIZE
;
17782 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
17784 it_fsm_pre_encode ();
17785 opcode
->aencode ();
17786 it_fsm_post_encode ();
17788 /* Arm mode bx is marked as both v4T and v5 because it's still required
17789 on a hypothetical non-thumb v5 core. */
17791 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
17793 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
17794 *opcode
->avariant
);
17796 check_neon_suffixes
;
17800 mapping_state (MAP_ARM
);
17805 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17813 check_it_blocks_finished (void)
17818 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
17819 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
17820 == MANUAL_IT_BLOCK
)
17822 as_warn (_("section '%s' finished with an open IT block."),
17826 if (now_it
.state
== MANUAL_IT_BLOCK
)
17827 as_warn (_("file finished with an open IT block."));
17831 /* Various frobbings of labels and their addresses. */
17834 arm_start_line_hook (void)
17836 last_label_seen
= NULL
;
17840 arm_frob_label (symbolS
* sym
)
17842 last_label_seen
= sym
;
17844 ARM_SET_THUMB (sym
, thumb_mode
);
17846 #if defined OBJ_COFF || defined OBJ_ELF
17847 ARM_SET_INTERWORK (sym
, support_interwork
);
17850 force_automatic_it_block_close ();
17852 /* Note - do not allow local symbols (.Lxxx) to be labelled
17853 as Thumb functions. This is because these labels, whilst
17854 they exist inside Thumb code, are not the entry points for
17855 possible ARM->Thumb calls. Also, these labels can be used
17856 as part of a computed goto or switch statement. eg gcc
17857 can generate code that looks like this:
17859 ldr r2, [pc, .Laaa]
17869 The first instruction loads the address of the jump table.
17870 The second instruction converts a table index into a byte offset.
17871 The third instruction gets the jump address out of the table.
17872 The fourth instruction performs the jump.
17874 If the address stored at .Laaa is that of a symbol which has the
17875 Thumb_Func bit set, then the linker will arrange for this address
17876 to have the bottom bit set, which in turn would mean that the
17877 address computation performed by the third instruction would end
17878 up with the bottom bit set. Since the ARM is capable of unaligned
17879 word loads, the instruction would then load the incorrect address
17880 out of the jump table, and chaos would ensue. */
17881 if (label_is_thumb_function_name
17882 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
17883 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
17885 /* When the address of a Thumb function is taken the bottom
17886 bit of that address should be set. This will allow
17887 interworking between Arm and Thumb functions to work
17890 THUMB_SET_FUNC (sym
, 1);
17892 label_is_thumb_function_name
= FALSE
;
17895 dwarf2_emit_label (sym
);
17899 arm_data_in_code (void)
17901 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
17903 *input_line_pointer
= '/';
17904 input_line_pointer
+= 5;
17905 *input_line_pointer
= 0;
17913 arm_canonicalize_symbol_name (char * name
)
17917 if (thumb_mode
&& (len
= strlen (name
)) > 5
17918 && streq (name
+ len
- 5, "/data"))
17919 *(name
+ len
- 5) = 0;
17924 /* Table of all register names defined by default. The user can
17925 define additional names with .req. Note that all register names
17926 should appear in both upper and lowercase variants. Some registers
17927 also have mixed-case names. */
17929 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17930 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17931 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17932 #define REGSET(p,t) \
17933 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17934 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17935 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17936 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17937 #define REGSETH(p,t) \
17938 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17939 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17940 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17941 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17942 #define REGSET2(p,t) \
17943 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17944 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17945 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17946 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17947 #define SPLRBANK(base,bank,t) \
17948 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17949 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17950 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17951 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17952 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17953 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17955 static const struct reg_entry reg_names
[] =
17957 /* ARM integer registers. */
17958 REGSET(r
, RN
), REGSET(R
, RN
),
17960 /* ATPCS synonyms. */
17961 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
17962 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
17963 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
17965 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
17966 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
17967 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
17969 /* Well-known aliases. */
17970 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
17971 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
17973 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
17974 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
17976 /* Coprocessor numbers. */
17977 REGSET(p
, CP
), REGSET(P
, CP
),
17979 /* Coprocessor register numbers. The "cr" variants are for backward
17981 REGSET(c
, CN
), REGSET(C
, CN
),
17982 REGSET(cr
, CN
), REGSET(CR
, CN
),
17984 /* ARM banked registers. */
17985 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
17986 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
17987 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
17988 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
17989 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
17990 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
17991 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
17993 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
17994 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
17995 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
17996 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
17997 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
17998 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
17999 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18000 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18002 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18003 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18004 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18005 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18006 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18007 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18008 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18009 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18010 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18012 /* FPA registers. */
18013 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18014 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18016 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18017 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18019 /* VFP SP registers. */
18020 REGSET(s
,VFS
), REGSET(S
,VFS
),
18021 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18023 /* VFP DP Registers. */
18024 REGSET(d
,VFD
), REGSET(D
,VFD
),
18025 /* Extra Neon DP registers. */
18026 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18028 /* Neon QP registers. */
18029 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18031 /* VFP control registers. */
18032 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18033 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18034 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18035 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18036 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18037 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18039 /* Maverick DSP coprocessor registers. */
18040 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18041 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18043 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18044 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18045 REGDEF(dspsc
,0,DSPSC
),
18047 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18048 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18049 REGDEF(DSPSC
,0,DSPSC
),
18051 /* iWMMXt data registers - p0, c0-15. */
18052 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18054 /* iWMMXt control registers - p1, c0-3. */
18055 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18056 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18057 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18058 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18060 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18061 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18062 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18063 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18064 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18066 /* XScale accumulator registers. */
18067 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18073 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18074 within psr_required_here. */
18075 static const struct asm_psr psrs
[] =
18077 /* Backward compatibility notation. Note that "all" is no longer
18078 truly all possible PSR bits. */
18079 {"all", PSR_c
| PSR_f
},
18083 /* Individual flags. */
18089 /* Combinations of flags. */
18090 {"fs", PSR_f
| PSR_s
},
18091 {"fx", PSR_f
| PSR_x
},
18092 {"fc", PSR_f
| PSR_c
},
18093 {"sf", PSR_s
| PSR_f
},
18094 {"sx", PSR_s
| PSR_x
},
18095 {"sc", PSR_s
| PSR_c
},
18096 {"xf", PSR_x
| PSR_f
},
18097 {"xs", PSR_x
| PSR_s
},
18098 {"xc", PSR_x
| PSR_c
},
18099 {"cf", PSR_c
| PSR_f
},
18100 {"cs", PSR_c
| PSR_s
},
18101 {"cx", PSR_c
| PSR_x
},
18102 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18103 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18104 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18105 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18106 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18107 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18108 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18109 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18110 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18111 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18112 {"scf", PSR_s
| PSR_c
| PSR_f
},
18113 {"scx", PSR_s
| PSR_c
| PSR_x
},
18114 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18115 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18116 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18117 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18118 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18119 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18120 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18121 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18122 {"csf", PSR_c
| PSR_s
| PSR_f
},
18123 {"csx", PSR_c
| PSR_s
| PSR_x
},
18124 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18125 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18126 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18127 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18128 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18129 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18130 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18131 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18132 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18133 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18134 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18135 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18136 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18137 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18138 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18139 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18140 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18141 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18142 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18143 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18144 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18145 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18146 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18147 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18148 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18149 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18152 /* Table of V7M psr names. */
18153 static const struct asm_psr v7m_psrs
[] =
18155 {"apsr", 0 }, {"APSR", 0 },
18156 {"iapsr", 1 }, {"IAPSR", 1 },
18157 {"eapsr", 2 }, {"EAPSR", 2 },
18158 {"psr", 3 }, {"PSR", 3 },
18159 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18160 {"ipsr", 5 }, {"IPSR", 5 },
18161 {"epsr", 6 }, {"EPSR", 6 },
18162 {"iepsr", 7 }, {"IEPSR", 7 },
18163 {"msp", 8 }, {"MSP", 8 },
18164 {"psp", 9 }, {"PSP", 9 },
18165 {"primask", 16}, {"PRIMASK", 16},
18166 {"basepri", 17}, {"BASEPRI", 17},
18167 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18168 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18169 {"faultmask", 19}, {"FAULTMASK", 19},
18170 {"control", 20}, {"CONTROL", 20}
18173 /* Table of all shift-in-operand names. */
18174 static const struct asm_shift_name shift_names
[] =
18176 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18177 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18178 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18179 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18180 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18181 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18184 /* Table of all explicit relocation names. */
18186 static struct reloc_entry reloc_names
[] =
18188 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
18189 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
18190 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
18191 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
18192 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
18193 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
18194 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
18195 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
18196 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
18197 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
18198 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
18199 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
18200 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
18201 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
18202 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
18203 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
18204 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
18205 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
18209 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18210 static const struct asm_cond conds
[] =
18214 {"cs", 0x2}, {"hs", 0x2},
18215 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18229 #define UL_BARRIER(L,U,CODE,FEAT) \
18230 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18231 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18233 static struct asm_barrier_opt barrier_opt_names
[] =
18235 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
18236 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
18237 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
18238 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
18239 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
18240 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
18241 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
18242 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
18243 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
18244 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
18245 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
18246 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
18247 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
18248 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
18249 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
18250 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
18255 /* Table of ARM-format instructions. */
18257 /* Macros for gluing together operand strings. N.B. In all cases
18258 other than OPS0, the trailing OP_stop comes from default
18259 zero-initialization of the unspecified elements of the array. */
18260 #define OPS0() { OP_stop, }
18261 #define OPS1(a) { OP_##a, }
18262 #define OPS2(a,b) { OP_##a,OP_##b, }
18263 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18264 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18265 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18266 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18268 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18269 This is useful when mixing operands for ARM and THUMB, i.e. using the
18270 MIX_ARM_THUMB_OPERANDS macro.
18271 In order to use these macros, prefix the number of operands with _
18273 #define OPS_1(a) { a, }
18274 #define OPS_2(a,b) { a,b, }
18275 #define OPS_3(a,b,c) { a,b,c, }
18276 #define OPS_4(a,b,c,d) { a,b,c,d, }
18277 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18278 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18280 /* These macros abstract out the exact format of the mnemonic table and
18281 save some repeated characters. */
18283 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18284 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18285 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18286 THUMB_VARIANT, do_##ae, do_##te }
18288 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18289 a T_MNEM_xyz enumerator. */
18290 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18291 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18292 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18293 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18295 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18296 infix after the third character. */
18297 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18298 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18299 THUMB_VARIANT, do_##ae, do_##te }
18300 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18301 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18302 THUMB_VARIANT, do_##ae, do_##te }
18303 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18304 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18305 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18306 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18307 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18308 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18309 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18310 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18312 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18313 field is still 0xE. Many of the Thumb variants can be executed
18314 conditionally, so this is checked separately. */
18315 #define TUE(mnem, op, top, nops, ops, ae, te) \
18316 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18317 THUMB_VARIANT, do_##ae, do_##te }
18319 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18320 Used by mnemonics that have very minimal differences in the encoding for
18321 ARM and Thumb variants and can be handled in a common function. */
18322 #define TUEc(mnem, op, top, nops, ops, en) \
18323 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18324 THUMB_VARIANT, do_##en, do_##en }
18326 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18327 condition code field. */
18328 #define TUF(mnem, op, top, nops, ops, ae, te) \
18329 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18330 THUMB_VARIANT, do_##ae, do_##te }
18332 /* ARM-only variants of all the above. */
18333 #define CE(mnem, op, nops, ops, ae) \
18334 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18336 #define C3(mnem, op, nops, ops, ae) \
18337 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18339 /* Legacy mnemonics that always have conditional infix after the third
18341 #define CL(mnem, op, nops, ops, ae) \
18342 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18343 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18345 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18346 #define cCE(mnem, op, nops, ops, ae) \
18347 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18349 /* Legacy coprocessor instructions where conditional infix and conditional
18350 suffix are ambiguous. For consistency this includes all FPA instructions,
18351 not just the potentially ambiguous ones. */
18352 #define cCL(mnem, op, nops, ops, ae) \
18353 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18354 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18356 /* Coprocessor, takes either a suffix or a position-3 infix
18357 (for an FPA corner case). */
18358 #define C3E(mnem, op, nops, ops, ae) \
18359 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18360 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18362 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18363 { m1 #m2 m3, OPS##nops ops, \
18364 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18365 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18367 #define CM(m1, m2, op, nops, ops, ae) \
18368 xCM_ (m1, , m2, op, nops, ops, ae), \
18369 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18370 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18371 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18372 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18373 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18374 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18375 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18376 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18377 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18378 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18379 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18380 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18381 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18382 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18383 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18384 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18385 xCM_ (m1, le, m2, op, nops, ops, ae), \
18386 xCM_ (m1, al, m2, op, nops, ops, ae)
18388 #define UE(mnem, op, nops, ops, ae) \
18389 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18391 #define UF(mnem, op, nops, ops, ae) \
18392 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18394 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18395 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18396 use the same encoding function for each. */
18397 #define NUF(mnem, op, nops, ops, enc) \
18398 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18399 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18401 /* Neon data processing, version which indirects through neon_enc_tab for
18402 the various overloaded versions of opcodes. */
18403 #define nUF(mnem, op, nops, ops, enc) \
18404 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18405 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18407 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18409 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18410 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18411 THUMB_VARIANT, do_##enc, do_##enc }
18413 #define NCE(mnem, op, nops, ops, enc) \
18414 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18416 #define NCEF(mnem, op, nops, ops, enc) \
18417 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18419 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18420 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18421 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18422 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18424 #define nCE(mnem, op, nops, ops, enc) \
18425 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18427 #define nCEF(mnem, op, nops, ops, enc) \
18428 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18432 static const struct asm_opcode insns
[] =
18434 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18435 #define THUMB_VARIANT & arm_ext_v4t
18436 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18437 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18438 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18439 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18440 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18441 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18442 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18443 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18444 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18445 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18446 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18447 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18448 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18449 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18450 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18451 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18453 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18454 for setting PSR flag bits. They are obsolete in V6 and do not
18455 have Thumb equivalents. */
18456 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18457 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18458 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
18459 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18460 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18461 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
18462 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18463 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18464 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
18466 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
18467 tC3("movs", 1b00000
, _movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
18468 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18469 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18471 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
18472 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18473 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
18475 OP_ADDRGLDR
),ldst
, t_ldst
),
18476 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18478 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18479 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18480 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18481 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18482 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18483 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18485 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18486 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18487 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
18488 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
18491 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
18492 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
18493 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
18494 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
18496 /* Thumb-compatibility pseudo ops. */
18497 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18498 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18499 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18500 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18501 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18502 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18503 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18504 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18505 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
18506 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
18507 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
18508 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
18510 /* These may simplify to neg. */
18511 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18512 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18514 #undef THUMB_VARIANT
18515 #define THUMB_VARIANT & arm_ext_v6
18517 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
18519 /* V1 instructions with no Thumb analogue prior to V6T2. */
18520 #undef THUMB_VARIANT
18521 #define THUMB_VARIANT & arm_ext_v6t2
18523 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18524 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18525 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
18527 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18528 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18529 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
18530 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18532 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18533 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18535 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18536 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18538 /* V1 instructions with no Thumb analogue at all. */
18539 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
18540 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
18542 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18543 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18544 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18545 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18546 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18547 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18548 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18549 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18552 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18553 #undef THUMB_VARIANT
18554 #define THUMB_VARIANT & arm_ext_v4t
18556 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18557 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18559 #undef THUMB_VARIANT
18560 #define THUMB_VARIANT & arm_ext_v6t2
18562 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
18563 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
18565 /* Generic coprocessor instructions. */
18566 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18567 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18568 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18569 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18570 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18571 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18572 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18575 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18577 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18578 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18581 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18582 #undef THUMB_VARIANT
18583 #define THUMB_VARIANT & arm_ext_msr
18585 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
18586 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
18589 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18590 #undef THUMB_VARIANT
18591 #define THUMB_VARIANT & arm_ext_v6t2
18593 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18594 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18595 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18596 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18597 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18598 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18599 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18600 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18603 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18604 #undef THUMB_VARIANT
18605 #define THUMB_VARIANT & arm_ext_v4t
18607 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18608 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18609 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18610 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18611 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18612 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18615 #define ARM_VARIANT & arm_ext_v4t_5
18617 /* ARM Architecture 4T. */
18618 /* Note: bx (and blx) are required on V5, even if the processor does
18619 not support Thumb. */
18620 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
18623 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18624 #undef THUMB_VARIANT
18625 #define THUMB_VARIANT & arm_ext_v5t
18627 /* Note: blx has 2 variants; the .value coded here is for
18628 BLX(2). Only this variant has conditional execution. */
18629 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
18630 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
18632 #undef THUMB_VARIANT
18633 #define THUMB_VARIANT & arm_ext_v6t2
18635 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
18636 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18637 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18638 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18639 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18640 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18641 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18642 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18645 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18646 #undef THUMB_VARIANT
18647 #define THUMB_VARIANT & arm_ext_v5exp
18649 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18650 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18651 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18652 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18654 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18655 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18657 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18658 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18659 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18660 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18662 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18663 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18664 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18665 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18667 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18668 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18670 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18671 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18672 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18673 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18676 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18677 #undef THUMB_VARIANT
18678 #define THUMB_VARIANT & arm_ext_v6t2
18680 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
18681 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
18683 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
18684 ADDRGLDRS
), ldrd
, t_ldstd
),
18686 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18687 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18690 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18692 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
18695 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18696 #undef THUMB_VARIANT
18697 #define THUMB_VARIANT & arm_ext_v6
18699 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
18700 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
18701 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18702 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18703 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18704 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18705 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18706 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18707 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18708 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
18710 #undef THUMB_VARIANT
18711 #define THUMB_VARIANT & arm_ext_v6t2
18713 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
18714 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
18716 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18717 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18719 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
18720 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
18722 /* ARM V6 not included in V7M. */
18723 #undef THUMB_VARIANT
18724 #define THUMB_VARIANT & arm_ext_v6_notm
18725 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18726 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18727 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
18728 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
18729 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
18730 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18731 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
18732 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
18733 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
18734 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18735 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18736 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18737 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
18738 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
18739 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
18740 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
18741 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
18742 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
18744 /* ARM V6 not included in V7M (eg. integer SIMD). */
18745 #undef THUMB_VARIANT
18746 #define THUMB_VARIANT & arm_ext_v6_dsp
18747 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
18748 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
18749 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
18750 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18751 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18752 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18753 /* Old name for QASX. */
18754 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18755 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18756 /* Old name for QSAX. */
18757 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18758 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18759 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18760 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18761 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18762 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18763 /* Old name for SASX. */
18764 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18765 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18766 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18767 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18768 /* Old name for SHASX. */
18769 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18770 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18771 /* Old name for SHSAX. */
18772 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18773 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18774 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18775 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18776 /* Old name for SSAX. */
18777 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18778 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18779 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18780 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18781 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18782 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18783 /* Old name for UASX. */
18784 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18785 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18786 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18787 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18788 /* Old name for UHASX. */
18789 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18790 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18791 /* Old name for UHSAX. */
18792 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18793 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18794 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18795 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18796 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18797 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18798 /* Old name for UQASX. */
18799 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18800 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18801 /* Old name for UQSAX. */
18802 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18803 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18804 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18805 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18806 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18807 /* Old name for USAX. */
18808 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18809 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18810 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
18811 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
18812 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
18813 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18814 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
18815 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
18816 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
18817 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18818 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
18819 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
18820 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
18821 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
18822 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
18823 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
18824 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
18825 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
18826 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
18827 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
18828 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
18829 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
18830 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
18831 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18832 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18833 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18834 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18835 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18836 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18837 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
18838 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
18839 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18840 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
18841 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
18844 #define ARM_VARIANT & arm_ext_v6k
18845 #undef THUMB_VARIANT
18846 #define THUMB_VARIANT & arm_ext_v6k
18848 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
18849 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
18850 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
18851 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
18853 #undef THUMB_VARIANT
18854 #define THUMB_VARIANT & arm_ext_v6_notm
18855 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
18857 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
18858 RRnpcb
), strexd
, t_strexd
),
18860 #undef THUMB_VARIANT
18861 #define THUMB_VARIANT & arm_ext_v6t2
18862 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
18864 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
18866 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
18868 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
18870 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
18873 #define ARM_VARIANT & arm_ext_sec
18874 #undef THUMB_VARIANT
18875 #define THUMB_VARIANT & arm_ext_sec
18877 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
18880 #define ARM_VARIANT & arm_ext_virt
18881 #undef THUMB_VARIANT
18882 #define THUMB_VARIANT & arm_ext_virt
18884 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
18885 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
18888 #define ARM_VARIANT & arm_ext_v6t2
18889 #undef THUMB_VARIANT
18890 #define THUMB_VARIANT & arm_ext_v6t2
18892 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
18893 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
18894 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
18895 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
18897 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
18898 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
18899 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
18900 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
18902 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
18903 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
18904 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
18905 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
18907 /* Thumb-only instructions. */
18909 #define ARM_VARIANT NULL
18910 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
18911 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
18913 /* ARM does not really have an IT instruction, so always allow it.
18914 The opcode is copied from Thumb in order to allow warnings in
18915 -mimplicit-it=[never | arm] modes. */
18917 #define ARM_VARIANT & arm_ext_v1
18919 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
18920 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
18921 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
18922 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
18923 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
18924 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
18925 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
18926 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
18927 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
18928 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
18929 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
18930 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
18931 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
18932 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
18933 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
18934 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
18935 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
18936 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
18938 /* Thumb2 only instructions. */
18940 #define ARM_VARIANT NULL
18942 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
18943 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
18944 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
18945 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
18946 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
18947 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
18949 /* Hardware division instructions. */
18951 #define ARM_VARIANT & arm_ext_adiv
18952 #undef THUMB_VARIANT
18953 #define THUMB_VARIANT & arm_ext_div
18955 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
18956 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
18958 /* ARM V6M/V7 instructions. */
18960 #define ARM_VARIANT & arm_ext_barrier
18961 #undef THUMB_VARIANT
18962 #define THUMB_VARIANT & arm_ext_barrier
18964 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
18965 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
18966 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
18968 /* ARM V7 instructions. */
18970 #define ARM_VARIANT & arm_ext_v7
18971 #undef THUMB_VARIANT
18972 #define THUMB_VARIANT & arm_ext_v7
18974 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
18975 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
18978 #define ARM_VARIANT & arm_ext_mp
18979 #undef THUMB_VARIANT
18980 #define THUMB_VARIANT & arm_ext_mp
18982 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
18984 /* AArchv8 instructions. */
18986 #define ARM_VARIANT & arm_ext_v8
18987 #undef THUMB_VARIANT
18988 #define THUMB_VARIANT & arm_ext_v8
18990 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
18991 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
18992 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
18993 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
18995 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
18996 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
18997 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
18999 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19001 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19003 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19005 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19006 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19007 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19008 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19009 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19010 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19012 /* ARMv8 T32 only. */
19014 #define ARM_VARIANT NULL
19015 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19016 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19017 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19019 /* FP for ARMv8. */
19021 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19022 #undef THUMB_VARIANT
19023 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19025 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19026 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19027 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19028 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19029 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19030 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19031 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19032 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19033 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19034 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19035 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19036 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19037 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19038 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19039 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19040 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19041 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19043 /* Crypto v1 extensions. */
19045 #define ARM_VARIANT & fpu_crypto_ext_armv8
19046 #undef THUMB_VARIANT
19047 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19049 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19050 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19051 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19052 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19053 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19054 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19055 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19056 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19057 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19058 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19059 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19060 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19061 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19062 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19065 #define ARM_VARIANT & crc_ext_armv8
19066 #undef THUMB_VARIANT
19067 #define THUMB_VARIANT & crc_ext_armv8
19068 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19069 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19070 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19071 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19072 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19073 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19076 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19077 #undef THUMB_VARIANT
19078 #define THUMB_VARIANT NULL
19080 cCE("wfs", e200110
, 1, (RR
), rd
),
19081 cCE("rfs", e300110
, 1, (RR
), rd
),
19082 cCE("wfc", e400110
, 1, (RR
), rd
),
19083 cCE("rfc", e500110
, 1, (RR
), rd
),
19085 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19086 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19087 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19088 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19090 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19091 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19092 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19093 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19095 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19096 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19097 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19098 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19099 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19100 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19101 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19102 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19103 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19104 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19105 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19106 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19108 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19109 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19110 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19111 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19112 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19113 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19114 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19115 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19116 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19117 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19118 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19119 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19121 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19122 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19123 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19124 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19125 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19126 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19127 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19128 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19129 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19130 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19131 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19132 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19134 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19135 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19136 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19137 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19138 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19139 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19140 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19141 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19142 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19143 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19144 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19145 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19147 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19148 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19149 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19150 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19151 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19152 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19153 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
19154 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
19155 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
19156 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
19157 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
19158 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
19160 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
19161 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
19162 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
19163 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
19164 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
19165 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
19166 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
19167 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
19168 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
19169 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
19170 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
19171 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
19173 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
19174 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
19175 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
19176 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
19177 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
19178 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
19179 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
19180 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
19181 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
19182 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
19183 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
19184 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
19186 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
19187 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
19188 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
19189 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
19190 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
19191 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
19192 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
19193 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
19194 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
19195 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
19196 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
19197 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
19199 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
19200 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
19201 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
19202 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
19203 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
19204 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
19205 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
19206 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
19207 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
19208 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
19209 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
19210 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
19212 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
19213 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
19214 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
19215 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
19216 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
19217 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
19218 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
19219 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
19220 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
19221 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
19222 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
19223 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
19225 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
19226 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
19227 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
19228 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
19229 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
19230 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
19231 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
19232 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
19233 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
19234 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
19235 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
19236 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
19238 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
19239 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
19240 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
19241 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
19242 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
19243 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
19244 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
19245 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
19246 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
19247 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
19248 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
19249 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
19251 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
19252 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
19253 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
19254 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
19255 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
19256 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
19257 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
19258 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
19259 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
19260 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
19261 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
19262 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
19264 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
19265 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
19266 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
19267 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
19268 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
19269 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
19270 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
19271 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
19272 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
19273 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
19274 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
19275 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
19277 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
19278 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
19279 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
19280 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
19281 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
19282 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
19283 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
19284 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
19285 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
19286 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
19287 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
19288 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
19290 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
19291 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
19292 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
19293 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
19294 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
19295 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
19296 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
19297 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
19298 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
19299 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
19300 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
19301 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
19303 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19304 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19305 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19306 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19307 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19308 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19309 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19310 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19311 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19312 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19313 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19314 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19316 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19317 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19318 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19319 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19320 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19321 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19322 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19323 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19324 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19325 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19326 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19327 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19329 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19330 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19331 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19332 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19333 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19334 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19335 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19336 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19337 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19338 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19339 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19340 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19342 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19343 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19344 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19345 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19346 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19347 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19348 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19349 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19350 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19351 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19352 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19353 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19355 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19356 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19357 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19358 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19359 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19360 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19361 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19362 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19363 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19364 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19365 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19366 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19368 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19369 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19370 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19371 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19372 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19373 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19374 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19375 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19376 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19377 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19378 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19379 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19381 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19382 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19383 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19384 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19385 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19386 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19387 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19388 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19389 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19390 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19391 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19392 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19394 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19395 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19396 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19397 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19398 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19399 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19400 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19401 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19402 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19403 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19404 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19405 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19407 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19408 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19409 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19410 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19411 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19412 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19413 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19414 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19415 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19416 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19417 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19418 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19420 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19421 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19422 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19423 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19424 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19425 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19426 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19427 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19428 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19429 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19430 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19431 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19433 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19434 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19435 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19436 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19437 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19438 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19439 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19440 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19441 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19442 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19443 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19444 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19446 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19447 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19448 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19449 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19450 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19451 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19452 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19453 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19454 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19455 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19456 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19457 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19459 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19460 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19461 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19462 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19463 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19464 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19465 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19466 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19467 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19468 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19469 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19470 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19472 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19473 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19474 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19475 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19477 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
19478 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
19479 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
19480 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
19481 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
19482 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
19483 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
19484 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
19485 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
19486 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
19487 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
19488 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
19490 /* The implementation of the FIX instruction is broken on some
19491 assemblers, in that it accepts a precision specifier as well as a
19492 rounding specifier, despite the fact that this is meaningless.
19493 To be more compatible, we accept it as well, though of course it
19494 does not set any bits. */
19495 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
19496 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
19497 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
19498 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
19499 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
19500 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
19501 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
19502 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
19503 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
19504 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
19505 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
19506 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
19507 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
19509 /* Instructions that were new with the real FPA, call them V2. */
19511 #define ARM_VARIANT & fpu_fpa_ext_v2
19513 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19514 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19515 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19516 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19517 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19518 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19521 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19523 /* Moves and type conversions. */
19524 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19525 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
19526 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
19527 cCE("fmstat", ef1fa10
, 0, (), noargs
),
19528 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
19529 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
19530 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19531 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19532 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19533 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19534 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19535 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19536 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
19537 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
19539 /* Memory operations. */
19540 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19541 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19542 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19543 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19544 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19545 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19546 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19547 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19548 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19549 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19550 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19551 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19552 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19553 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19554 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19555 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19556 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19557 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19559 /* Monadic operations. */
19560 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19561 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19562 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19564 /* Dyadic operations. */
19565 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19566 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19567 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19568 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19569 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19570 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19571 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19572 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19573 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19576 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19577 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
19578 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19579 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
19581 /* Double precision load/store are still present on single precision
19582 implementations. */
19583 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19584 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19585 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19586 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19587 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19588 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19589 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19590 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19591 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19592 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19595 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19597 /* Moves and type conversions. */
19598 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19599 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19600 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19601 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19602 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19603 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19604 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19605 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19606 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19607 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19608 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19609 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19610 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19612 /* Monadic operations. */
19613 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19614 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19615 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19617 /* Dyadic operations. */
19618 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19619 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19620 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19621 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19622 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19623 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19624 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19625 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19626 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19629 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19630 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
19631 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19632 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
19635 #define ARM_VARIANT & fpu_vfp_ext_v2
19637 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
19638 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
19639 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
19640 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
19642 /* Instructions which may belong to either the Neon or VFP instruction sets.
19643 Individual encoder functions perform additional architecture checks. */
19645 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19646 #undef THUMB_VARIANT
19647 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19649 /* These mnemonics are unique to VFP. */
19650 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
19651 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
19652 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19653 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19654 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19655 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
19656 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
19657 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
19658 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
19659 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
19661 /* Mnemonics shared by Neon and VFP. */
19662 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
19663 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
19664 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
19666 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
19667 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
19669 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
19670 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
19672 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19673 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19674 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19675 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19676 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19677 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19678 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
19679 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
19681 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
19682 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
19683 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
19684 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
19687 /* NOTE: All VMOV encoding is special-cased! */
19688 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
19689 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
19691 #undef THUMB_VARIANT
19692 #define THUMB_VARIANT & fpu_neon_ext_v1
19694 #define ARM_VARIANT & fpu_neon_ext_v1
19696 /* Data processing with three registers of the same length. */
19697 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19698 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
19699 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
19700 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19701 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19702 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19703 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19704 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19705 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19706 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19707 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
19708 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
19709 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
19710 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
19711 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
19712 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
19713 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
19714 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
19715 /* If not immediate, fall back to neon_dyadic_i64_su.
19716 shl_imm should accept I8 I16 I32 I64,
19717 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19718 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
19719 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
19720 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
19721 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
19722 /* Logic ops, types optional & ignored. */
19723 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
19724 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
19725 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
19726 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
19727 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
19728 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
19729 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
19730 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
19731 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
19732 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
19733 /* Bitfield ops, untyped. */
19734 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
19735 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
19736 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
19737 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
19738 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
19739 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
19740 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
19741 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
19742 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
19743 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
19744 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
19745 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
19746 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
19747 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19748 back to neon_dyadic_if_su. */
19749 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
19750 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
19751 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
19752 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
19753 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
19754 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
19755 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
19756 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
19757 /* Comparison. Type I8 I16 I32 F32. */
19758 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
19759 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
19760 /* As above, D registers only. */
19761 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
19762 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
19763 /* Int and float variants, signedness unimportant. */
19764 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
19765 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
19766 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
19767 /* Add/sub take types I8 I16 I32 I64 F32. */
19768 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
19769 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
19770 /* vtst takes sizes 8, 16, 32. */
19771 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
19772 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
19773 /* VMUL takes I8 I16 I32 F32 P8. */
19774 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
19775 /* VQD{R}MULH takes S16 S32. */
19776 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
19777 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
19778 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
19779 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
19780 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
19781 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
19782 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
19783 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
19784 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
19785 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
19786 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
19787 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
19788 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
19789 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
19790 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
19791 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
19793 /* Two address, int/float. Types S8 S16 S32 F32. */
19794 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
19795 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
19797 /* Data processing with two registers and a shift amount. */
19798 /* Right shifts, and variants with rounding.
19799 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
19800 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
19801 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
19802 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
19803 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
19804 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
19805 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
19806 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
19807 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
19808 /* Shift and insert. Sizes accepted 8 16 32 64. */
19809 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
19810 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
19811 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
19812 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
19813 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
19814 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
19815 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
19816 /* Right shift immediate, saturating & narrowing, with rounding variants.
19817 Types accepted S16 S32 S64 U16 U32 U64. */
19818 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
19819 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
19820 /* As above, unsigned. Types accepted S16 S32 S64. */
19821 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
19822 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
19823 /* Right shift narrowing. Types accepted I16 I32 I64. */
19824 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
19825 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
19826 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
19827 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
19828 /* CVT with optional immediate for fixed-point variant. */
19829 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
19831 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
19832 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
19834 /* Data processing, three registers of different lengths. */
19835 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
19836 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
19837 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
19838 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
19839 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
19840 /* If not scalar, fall back to neon_dyadic_long.
19841 Vector types as above, scalar types S16 S32 U16 U32. */
19842 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
19843 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
19844 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
19845 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
19846 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
19847 /* Dyadic, narrowing insns. Types I16 I32 I64. */
19848 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
19849 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
19850 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
19851 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
19852 /* Saturating doubling multiplies. Types S16 S32. */
19853 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
19854 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
19855 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
19856 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19857 S16 S32 U16 U32. */
19858 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
19860 /* Extract. Size 8. */
19861 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
19862 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
19864 /* Two registers, miscellaneous. */
19865 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
19866 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
19867 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
19868 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
19869 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
19870 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
19871 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
19872 /* Vector replicate. Sizes 8 16 32. */
19873 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
19874 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
19875 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
19876 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
19877 /* VMOVN. Types I16 I32 I64. */
19878 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
19879 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
19880 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
19881 /* VQMOVUN. Types S16 S32 S64. */
19882 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
19883 /* VZIP / VUZP. Sizes 8 16 32. */
19884 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
19885 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
19886 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
19887 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
19888 /* VQABS / VQNEG. Types S8 S16 S32. */
19889 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
19890 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
19891 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
19892 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
19893 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
19894 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
19895 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
19896 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
19897 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
19898 /* Reciprocal estimates. Types U32 F32. */
19899 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
19900 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
19901 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
19902 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
19903 /* VCLS. Types S8 S16 S32. */
19904 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
19905 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
19906 /* VCLZ. Types I8 I16 I32. */
19907 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
19908 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
19909 /* VCNT. Size 8. */
19910 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
19911 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
19912 /* Two address, untyped. */
19913 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
19914 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
19915 /* VTRN. Sizes 8 16 32. */
19916 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
19917 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
19919 /* Table lookup. Size 8. */
19920 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
19921 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
19923 #undef THUMB_VARIANT
19924 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
19926 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
19928 /* Neon element/structure load/store. */
19929 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
19930 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
19931 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
19932 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
19933 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
19934 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
19935 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
19936 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
19938 #undef THUMB_VARIANT
19939 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
19941 #define ARM_VARIANT & fpu_vfp_ext_v3xd
19942 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
19943 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
19944 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
19945 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
19946 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
19947 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
19948 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
19949 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
19950 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
19952 #undef THUMB_VARIANT
19953 #define THUMB_VARIANT & fpu_vfp_ext_v3
19955 #define ARM_VARIANT & fpu_vfp_ext_v3
19957 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
19958 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
19959 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
19960 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
19961 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
19962 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
19963 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
19964 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
19965 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
19968 #define ARM_VARIANT & fpu_vfp_ext_fma
19969 #undef THUMB_VARIANT
19970 #define THUMB_VARIANT & fpu_vfp_ext_fma
19971 /* Mnemonics shared by Neon and VFP. These are included in the
19972 VFP FMA variant; NEON and VFP FMA always includes the NEON
19973 FMA instructions. */
19974 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
19975 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
19976 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19977 the v form should always be used. */
19978 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19979 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19980 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19981 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19982 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19983 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19985 #undef THUMB_VARIANT
19987 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
19989 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
19990 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
19991 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
19992 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
19993 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
19994 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
19995 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
19996 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
19999 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20001 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20002 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20003 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20004 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20005 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20006 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20007 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20008 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20009 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20010 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20011 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20012 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20013 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20014 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20015 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20016 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20017 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20018 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20019 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20020 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20021 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20022 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20023 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20024 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20025 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20026 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20027 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20028 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20029 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20030 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20031 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20032 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20033 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20034 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20035 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20036 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20037 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20038 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20039 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20040 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20041 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20042 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20043 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20044 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20045 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20046 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20047 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20048 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20049 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20050 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20051 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20052 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20053 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20054 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20055 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20056 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20057 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20058 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20059 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20060 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20061 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20062 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20063 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20064 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20065 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20066 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20067 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20068 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20069 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20070 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20071 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20072 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20073 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20074 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20075 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20076 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20077 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20078 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20079 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20080 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20081 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20082 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20083 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20084 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20085 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20086 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20087 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20088 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20089 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20090 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20091 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20092 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20093 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20094 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20095 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20096 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20097 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20098 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20099 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20100 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20101 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20102 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20103 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20104 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20105 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20106 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20107 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20108 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20109 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20110 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20111 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20112 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20113 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20114 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20115 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20116 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20117 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20118 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20119 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20120 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20121 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20122 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20123 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20124 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20125 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20126 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20127 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20128 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20129 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20130 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20131 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20132 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20133 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20134 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20135 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20136 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20137 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20138 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20139 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20140 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20141 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20142 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20143 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20144 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20145 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20146 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20147 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20148 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20149 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20150 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20151 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20152 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20153 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20154 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20155 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20156 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20157 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20158 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20159 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20160 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20161 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20162 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
20165 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20167 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
20168 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
20169 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
20170 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20171 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20172 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20173 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20174 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20175 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20176 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20177 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20178 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20179 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20180 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20181 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20182 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20183 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20184 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20185 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20186 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20187 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
20188 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20189 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20190 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20191 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20192 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20193 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20194 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20195 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20196 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20197 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20198 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20199 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20200 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20201 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20202 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20203 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20204 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20205 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20206 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20207 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20208 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20209 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20210 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20211 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20212 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20213 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20214 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20215 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20216 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20217 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20218 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20219 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20220 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20221 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20222 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20223 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20226 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20228 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20229 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20230 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20231 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20232 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20233 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20234 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20235 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20236 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
20237 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
20238 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
20239 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
20240 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
20241 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
20242 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
20243 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
20244 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
20245 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
20246 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
20247 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
20248 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
20249 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
20250 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
20251 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
20252 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
20253 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
20254 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
20255 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
20256 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
20257 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
20258 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
20259 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
20260 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
20261 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
20262 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
20263 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
20264 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
20265 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
20266 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
20267 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
20268 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
20269 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
20270 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
20271 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
20272 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
20273 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
20274 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
20275 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
20276 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
20277 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
20278 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
20279 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
20280 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
20281 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
20282 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20283 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20284 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20285 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20286 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20287 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20288 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
20289 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
20290 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
20291 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
20292 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20293 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20294 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20295 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20296 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20297 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20298 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20299 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20300 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20301 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20302 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20303 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20306 #undef THUMB_VARIANT
20332 /* MD interface: bits in the object file. */
20334 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20335 for use in the a.out file, and stores them in the array pointed to by buf.
20336 This knows about the endian-ness of the target machine and does
20337 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20338 2 (short) and 4 (long) Floating numbers are put out as a series of
20339 LITTLENUMS (shorts, here at least). */
20342 md_number_to_chars (char * buf
, valueT val
, int n
)
20344 if (target_big_endian
)
20345 number_to_chars_bigendian (buf
, val
, n
);
20347 number_to_chars_littleendian (buf
, val
, n
);
20351 md_chars_to_number (char * buf
, int n
)
20354 unsigned char * where
= (unsigned char *) buf
;
20356 if (target_big_endian
)
20361 result
|= (*where
++ & 255);
20369 result
|= (where
[n
] & 255);
20376 /* MD interface: Sections. */
20378 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20379 that an rs_machine_dependent frag may reach. */
20382 arm_frag_max_var (fragS
*fragp
)
20384 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20385 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20387 Note that we generate relaxable instructions even for cases that don't
20388 really need it, like an immediate that's a trivial constant. So we're
20389 overestimating the instruction size for some of those cases. Rather
20390 than putting more intelligence here, it would probably be better to
20391 avoid generating a relaxation frag in the first place when it can be
20392 determined up front that a short instruction will suffice. */
20394 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
20398 /* Estimate the size of a frag before relaxing. Assume everything fits in
20402 md_estimate_size_before_relax (fragS
* fragp
,
20403 segT segtype ATTRIBUTE_UNUSED
)
20409 /* Convert a machine dependent frag. */
20412 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
20414 unsigned long insn
;
20415 unsigned long old_op
;
20423 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20425 old_op
= bfd_get_16(abfd
, buf
);
20426 if (fragp
->fr_symbol
)
20428 exp
.X_op
= O_symbol
;
20429 exp
.X_add_symbol
= fragp
->fr_symbol
;
20433 exp
.X_op
= O_constant
;
20435 exp
.X_add_number
= fragp
->fr_offset
;
20436 opcode
= fragp
->fr_subtype
;
20439 case T_MNEM_ldr_pc
:
20440 case T_MNEM_ldr_pc2
:
20441 case T_MNEM_ldr_sp
:
20442 case T_MNEM_str_sp
:
20449 if (fragp
->fr_var
== 4)
20451 insn
= THUMB_OP32 (opcode
);
20452 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
20454 insn
|= (old_op
& 0x700) << 4;
20458 insn
|= (old_op
& 7) << 12;
20459 insn
|= (old_op
& 0x38) << 13;
20461 insn
|= 0x00000c00;
20462 put_thumb32_insn (buf
, insn
);
20463 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
20467 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
20469 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
20472 if (fragp
->fr_var
== 4)
20474 insn
= THUMB_OP32 (opcode
);
20475 insn
|= (old_op
& 0xf0) << 4;
20476 put_thumb32_insn (buf
, insn
);
20477 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
20481 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20482 exp
.X_add_number
-= 4;
20490 if (fragp
->fr_var
== 4)
20492 int r0off
= (opcode
== T_MNEM_mov
20493 || opcode
== T_MNEM_movs
) ? 0 : 8;
20494 insn
= THUMB_OP32 (opcode
);
20495 insn
= (insn
& 0xe1ffffff) | 0x10000000;
20496 insn
|= (old_op
& 0x700) << r0off
;
20497 put_thumb32_insn (buf
, insn
);
20498 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20502 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
20507 if (fragp
->fr_var
== 4)
20509 insn
= THUMB_OP32(opcode
);
20510 put_thumb32_insn (buf
, insn
);
20511 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
20514 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
20518 if (fragp
->fr_var
== 4)
20520 insn
= THUMB_OP32(opcode
);
20521 insn
|= (old_op
& 0xf00) << 14;
20522 put_thumb32_insn (buf
, insn
);
20523 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
20526 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
20529 case T_MNEM_add_sp
:
20530 case T_MNEM_add_pc
:
20531 case T_MNEM_inc_sp
:
20532 case T_MNEM_dec_sp
:
20533 if (fragp
->fr_var
== 4)
20535 /* ??? Choose between add and addw. */
20536 insn
= THUMB_OP32 (opcode
);
20537 insn
|= (old_op
& 0xf0) << 4;
20538 put_thumb32_insn (buf
, insn
);
20539 if (opcode
== T_MNEM_add_pc
)
20540 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
20542 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20545 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20553 if (fragp
->fr_var
== 4)
20555 insn
= THUMB_OP32 (opcode
);
20556 insn
|= (old_op
& 0xf0) << 4;
20557 insn
|= (old_op
& 0xf) << 16;
20558 put_thumb32_insn (buf
, insn
);
20559 if (insn
& (1 << 20))
20560 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20562 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20565 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20571 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
20572 (enum bfd_reloc_code_real
) reloc_type
);
20573 fixp
->fx_file
= fragp
->fr_file
;
20574 fixp
->fx_line
= fragp
->fr_line
;
20575 fragp
->fr_fix
+= fragp
->fr_var
;
20577 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20578 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
20579 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
20580 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
20583 /* Return the size of a relaxable immediate operand instruction.
20584 SHIFT and SIZE specify the form of the allowable immediate. */
20586 relax_immediate (fragS
*fragp
, int size
, int shift
)
20592 /* ??? Should be able to do better than this. */
20593 if (fragp
->fr_symbol
)
20596 low
= (1 << shift
) - 1;
20597 mask
= (1 << (shift
+ size
)) - (1 << shift
);
20598 offset
= fragp
->fr_offset
;
20599 /* Force misaligned offsets to 32-bit variant. */
20602 if (offset
& ~mask
)
20607 /* Get the address of a symbol during relaxation. */
20609 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
20615 sym
= fragp
->fr_symbol
;
20616 sym_frag
= symbol_get_frag (sym
);
20617 know (S_GET_SEGMENT (sym
) != absolute_section
20618 || sym_frag
== &zero_address_frag
);
20619 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
20621 /* If frag has yet to be reached on this pass, assume it will
20622 move by STRETCH just as we did. If this is not so, it will
20623 be because some frag between grows, and that will force
20627 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
20631 /* Adjust stretch for any alignment frag. Note that if have
20632 been expanding the earlier code, the symbol may be
20633 defined in what appears to be an earlier frag. FIXME:
20634 This doesn't handle the fr_subtype field, which specifies
20635 a maximum number of bytes to skip when doing an
20637 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
20639 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
20642 stretch
= - ((- stretch
)
20643 & ~ ((1 << (int) f
->fr_offset
) - 1));
20645 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
20657 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20660 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
20665 /* Assume worst case for symbols not known to be in the same section. */
20666 if (fragp
->fr_symbol
== NULL
20667 || !S_IS_DEFINED (fragp
->fr_symbol
)
20668 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
20669 || S_IS_WEAK (fragp
->fr_symbol
))
20672 val
= relaxed_symbol_addr (fragp
, stretch
);
20673 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
20674 addr
= (addr
+ 4) & ~3;
20675 /* Force misaligned targets to 32-bit variant. */
20679 if (val
< 0 || val
> 1020)
20684 /* Return the size of a relaxable add/sub immediate instruction. */
20686 relax_addsub (fragS
*fragp
, asection
*sec
)
20691 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20692 op
= bfd_get_16(sec
->owner
, buf
);
20693 if ((op
& 0xf) == ((op
>> 4) & 0xf))
20694 return relax_immediate (fragp
, 8, 0);
20696 return relax_immediate (fragp
, 3, 0);
20699 /* Return TRUE iff the definition of symbol S could be pre-empted
20700 (overridden) at link or load time. */
20702 symbol_preemptible (symbolS
*s
)
20704 /* Weak symbols can always be pre-empted. */
20708 /* Non-global symbols cannot be pre-empted. */
20709 if (! S_IS_EXTERNAL (s
))
20713 /* In ELF, a global symbol can be marked protected, or private. In that
20714 case it can't be pre-empted (other definitions in the same link unit
20715 would violate the ODR). */
20716 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
20720 /* Other global symbols might be pre-empted. */
20724 /* Return the size of a relaxable branch instruction. BITS is the
20725 size of the offset field in the narrow instruction. */
20728 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
20734 /* Assume worst case for symbols not known to be in the same section. */
20735 if (!S_IS_DEFINED (fragp
->fr_symbol
)
20736 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
20737 || S_IS_WEAK (fragp
->fr_symbol
))
20741 /* A branch to a function in ARM state will require interworking. */
20742 if (S_IS_DEFINED (fragp
->fr_symbol
)
20743 && ARM_IS_FUNC (fragp
->fr_symbol
))
20747 if (symbol_preemptible (fragp
->fr_symbol
))
20750 val
= relaxed_symbol_addr (fragp
, stretch
);
20751 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
20754 /* Offset is a signed value *2 */
20756 if (val
>= limit
|| val
< -limit
)
20762 /* Relax a machine dependent frag. This returns the amount by which
20763 the current size of the frag should change. */
20766 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
20771 oldsize
= fragp
->fr_var
;
20772 switch (fragp
->fr_subtype
)
20774 case T_MNEM_ldr_pc2
:
20775 newsize
= relax_adr (fragp
, sec
, stretch
);
20777 case T_MNEM_ldr_pc
:
20778 case T_MNEM_ldr_sp
:
20779 case T_MNEM_str_sp
:
20780 newsize
= relax_immediate (fragp
, 8, 2);
20784 newsize
= relax_immediate (fragp
, 5, 2);
20788 newsize
= relax_immediate (fragp
, 5, 1);
20792 newsize
= relax_immediate (fragp
, 5, 0);
20795 newsize
= relax_adr (fragp
, sec
, stretch
);
20801 newsize
= relax_immediate (fragp
, 8, 0);
20804 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
20807 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
20809 case T_MNEM_add_sp
:
20810 case T_MNEM_add_pc
:
20811 newsize
= relax_immediate (fragp
, 8, 2);
20813 case T_MNEM_inc_sp
:
20814 case T_MNEM_dec_sp
:
20815 newsize
= relax_immediate (fragp
, 7, 2);
20821 newsize
= relax_addsub (fragp
, sec
);
20827 fragp
->fr_var
= newsize
;
20828 /* Freeze wide instructions that are at or before the same location as
20829 in the previous pass. This avoids infinite loops.
20830 Don't freeze them unconditionally because targets may be artificially
20831 misaligned by the expansion of preceding frags. */
20832 if (stretch
<= 0 && newsize
> 2)
20834 md_convert_frag (sec
->owner
, sec
, fragp
);
20838 return newsize
- oldsize
;
20841 /* Round up a section size to the appropriate boundary. */
20844 md_section_align (segT segment ATTRIBUTE_UNUSED
,
20847 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20848 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
20850 /* For a.out, force the section size to be aligned. If we don't do
20851 this, BFD will align it for us, but it will not write out the
20852 final bytes of the section. This may be a bug in BFD, but it is
20853 easier to fix it here since that is how the other a.out targets
20857 align
= bfd_get_section_alignment (stdoutput
, segment
);
20858 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
20865 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
20866 of an rs_align_code fragment. */
20869 arm_handle_align (fragS
* fragP
)
20871 static char const arm_noop
[2][2][4] =
20874 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
20875 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
20878 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
20879 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
20882 static char const thumb_noop
[2][2][2] =
20885 {0xc0, 0x46}, /* LE */
20886 {0x46, 0xc0}, /* BE */
20889 {0x00, 0xbf}, /* LE */
20890 {0xbf, 0x00} /* BE */
20893 static char const wide_thumb_noop
[2][4] =
20894 { /* Wide Thumb-2 */
20895 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
20896 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
20899 unsigned bytes
, fix
, noop_size
;
20902 const char *narrow_noop
= NULL
;
20907 if (fragP
->fr_type
!= rs_align_code
)
20910 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
20911 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
20914 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
20915 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
20917 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
20919 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
20921 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
20922 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
20924 narrow_noop
= thumb_noop
[1][target_big_endian
];
20925 noop
= wide_thumb_noop
[target_big_endian
];
20928 noop
= thumb_noop
[0][target_big_endian
];
20936 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
20937 ? selected_cpu
: arm_arch_none
,
20939 [target_big_endian
];
20946 fragP
->fr_var
= noop_size
;
20948 if (bytes
& (noop_size
- 1))
20950 fix
= bytes
& (noop_size
- 1);
20952 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
20954 memset (p
, 0, fix
);
20961 if (bytes
& noop_size
)
20963 /* Insert a narrow noop. */
20964 memcpy (p
, narrow_noop
, noop_size
);
20966 bytes
-= noop_size
;
20970 /* Use wide noops for the remainder */
20974 while (bytes
>= noop_size
)
20976 memcpy (p
, noop
, noop_size
);
20978 bytes
-= noop_size
;
20982 fragP
->fr_fix
+= fix
;
20985 /* Called from md_do_align. Used to create an alignment
20986 frag in a code section. */
20989 arm_frag_align_code (int n
, int max
)
20993 /* We assume that there will never be a requirement
20994 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
20995 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21000 _("alignments greater than %d bytes not supported in .text sections."),
21001 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21002 as_fatal ("%s", err_msg
);
21005 p
= frag_var (rs_align_code
,
21006 MAX_MEM_FOR_RS_ALIGN_CODE
,
21008 (relax_substateT
) max
,
21015 /* Perform target specific initialisation of a frag.
21016 Note - despite the name this initialisation is not done when the frag
21017 is created, but only when its type is assigned. A frag can be created
21018 and used a long time before its type is set, so beware of assuming that
21019 this initialisationis performed first. */
21023 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21025 /* Record whether this frag is in an ARM or a THUMB area. */
21026 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21029 #else /* OBJ_ELF is defined. */
21031 arm_init_frag (fragS
* fragP
, int max_chars
)
21033 /* If the current ARM vs THUMB mode has not already
21034 been recorded into this frag then do so now. */
21035 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21036 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21038 /* Record a mapping symbol for alignment frags. We will delete this
21039 later if the alignment ends up empty. */
21040 switch (fragP
->fr_type
)
21043 case rs_align_test
:
21045 mapping_state_2 (MAP_DATA
, max_chars
);
21047 case rs_align_code
:
21048 mapping_state_2 (thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21055 /* When we change sections we need to issue a new mapping symbol. */
21058 arm_elf_change_section (void)
21060 /* Link an unlinked unwind index table section to the .text section. */
21061 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21062 && elf_linked_to_section (now_seg
) == NULL
)
21063 elf_linked_to_section (now_seg
) = text_section
;
21067 arm_elf_section_type (const char * str
, size_t len
)
21069 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21070 return SHT_ARM_EXIDX
;
21075 /* Code to deal with unwinding tables. */
21077 static void add_unwind_adjustsp (offsetT
);
21079 /* Generate any deferred unwind frame offset. */
21082 flush_pending_unwind (void)
21086 offset
= unwind
.pending_offset
;
21087 unwind
.pending_offset
= 0;
21089 add_unwind_adjustsp (offset
);
21092 /* Add an opcode to this list for this function. Two-byte opcodes should
21093 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21097 add_unwind_opcode (valueT op
, int length
)
21099 /* Add any deferred stack adjustment. */
21100 if (unwind
.pending_offset
)
21101 flush_pending_unwind ();
21103 unwind
.sp_restored
= 0;
21105 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21107 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21108 if (unwind
.opcodes
)
21109 unwind
.opcodes
= (unsigned char *) xrealloc (unwind
.opcodes
,
21110 unwind
.opcode_alloc
);
21112 unwind
.opcodes
= (unsigned char *) xmalloc (unwind
.opcode_alloc
);
21117 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
21119 unwind
.opcode_count
++;
21123 /* Add unwind opcodes to adjust the stack pointer. */
21126 add_unwind_adjustsp (offsetT offset
)
21130 if (offset
> 0x200)
21132 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21137 /* Long form: 0xb2, uleb128. */
21138 /* This might not fit in a word so add the individual bytes,
21139 remembering the list is built in reverse order. */
21140 o
= (valueT
) ((offset
- 0x204) >> 2);
21142 add_unwind_opcode (0, 1);
21144 /* Calculate the uleb128 encoding of the offset. */
21148 bytes
[n
] = o
& 0x7f;
21154 /* Add the insn. */
21156 add_unwind_opcode (bytes
[n
- 1], 1);
21157 add_unwind_opcode (0xb2, 1);
21159 else if (offset
> 0x100)
21161 /* Two short opcodes. */
21162 add_unwind_opcode (0x3f, 1);
21163 op
= (offset
- 0x104) >> 2;
21164 add_unwind_opcode (op
, 1);
21166 else if (offset
> 0)
21168 /* Short opcode. */
21169 op
= (offset
- 4) >> 2;
21170 add_unwind_opcode (op
, 1);
21172 else if (offset
< 0)
21175 while (offset
> 0x100)
21177 add_unwind_opcode (0x7f, 1);
21180 op
= ((offset
- 4) >> 2) | 0x40;
21181 add_unwind_opcode (op
, 1);
21185 /* Finish the list of unwind opcodes for this function. */
21187 finish_unwind_opcodes (void)
21191 if (unwind
.fp_used
)
21193 /* Adjust sp as necessary. */
21194 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
21195 flush_pending_unwind ();
21197 /* After restoring sp from the frame pointer. */
21198 op
= 0x90 | unwind
.fp_reg
;
21199 add_unwind_opcode (op
, 1);
21202 flush_pending_unwind ();
21206 /* Start an exception table entry. If idx is nonzero this is an index table
21210 start_unwind_section (const segT text_seg
, int idx
)
21212 const char * text_name
;
21213 const char * prefix
;
21214 const char * prefix_once
;
21215 const char * group_name
;
21219 size_t sec_name_len
;
21226 prefix
= ELF_STRING_ARM_unwind
;
21227 prefix_once
= ELF_STRING_ARM_unwind_once
;
21228 type
= SHT_ARM_EXIDX
;
21232 prefix
= ELF_STRING_ARM_unwind_info
;
21233 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
21234 type
= SHT_PROGBITS
;
21237 text_name
= segment_name (text_seg
);
21238 if (streq (text_name
, ".text"))
21241 if (strncmp (text_name
, ".gnu.linkonce.t.",
21242 strlen (".gnu.linkonce.t.")) == 0)
21244 prefix
= prefix_once
;
21245 text_name
+= strlen (".gnu.linkonce.t.");
21248 prefix_len
= strlen (prefix
);
21249 text_len
= strlen (text_name
);
21250 sec_name_len
= prefix_len
+ text_len
;
21251 sec_name
= (char *) xmalloc (sec_name_len
+ 1);
21252 memcpy (sec_name
, prefix
, prefix_len
);
21253 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
21254 sec_name
[prefix_len
+ text_len
] = '\0';
21260 /* Handle COMDAT group. */
21261 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
21263 group_name
= elf_group_name (text_seg
);
21264 if (group_name
== NULL
)
21266 as_bad (_("Group section `%s' has no group signature"),
21267 segment_name (text_seg
));
21268 ignore_rest_of_line ();
21271 flags
|= SHF_GROUP
;
21275 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
21277 /* Set the section link for index tables. */
21279 elf_linked_to_section (now_seg
) = text_seg
;
21283 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21284 personality routine data. Returns zero, or the index table value for
21285 an inline entry. */
21288 create_unwind_entry (int have_data
)
21293 /* The current word of data. */
21295 /* The number of bytes left in this word. */
21298 finish_unwind_opcodes ();
21300 /* Remember the current text section. */
21301 unwind
.saved_seg
= now_seg
;
21302 unwind
.saved_subseg
= now_subseg
;
21304 start_unwind_section (now_seg
, 0);
21306 if (unwind
.personality_routine
== NULL
)
21308 if (unwind
.personality_index
== -2)
21311 as_bad (_("handlerdata in cantunwind frame"));
21312 return 1; /* EXIDX_CANTUNWIND. */
21315 /* Use a default personality routine if none is specified. */
21316 if (unwind
.personality_index
== -1)
21318 if (unwind
.opcode_count
> 3)
21319 unwind
.personality_index
= 1;
21321 unwind
.personality_index
= 0;
21324 /* Space for the personality routine entry. */
21325 if (unwind
.personality_index
== 0)
21327 if (unwind
.opcode_count
> 3)
21328 as_bad (_("too many unwind opcodes for personality routine 0"));
21332 /* All the data is inline in the index table. */
21335 while (unwind
.opcode_count
> 0)
21337 unwind
.opcode_count
--;
21338 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21342 /* Pad with "finish" opcodes. */
21344 data
= (data
<< 8) | 0xb0;
21351 /* We get two opcodes "free" in the first word. */
21352 size
= unwind
.opcode_count
- 2;
21356 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21357 if (unwind
.personality_index
!= -1)
21359 as_bad (_("attempt to recreate an unwind entry"));
21363 /* An extra byte is required for the opcode count. */
21364 size
= unwind
.opcode_count
+ 1;
21367 size
= (size
+ 3) >> 2;
21369 as_bad (_("too many unwind opcodes"));
21371 frag_align (2, 0, 0);
21372 record_alignment (now_seg
, 2);
21373 unwind
.table_entry
= expr_build_dot ();
21375 /* Allocate the table entry. */
21376 ptr
= frag_more ((size
<< 2) + 4);
21377 /* PR 13449: Zero the table entries in case some of them are not used. */
21378 memset (ptr
, 0, (size
<< 2) + 4);
21379 where
= frag_now_fix () - ((size
<< 2) + 4);
21381 switch (unwind
.personality_index
)
21384 /* ??? Should this be a PLT generating relocation? */
21385 /* Custom personality routine. */
21386 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
21387 BFD_RELOC_ARM_PREL31
);
21392 /* Set the first byte to the number of additional words. */
21393 data
= size
> 0 ? size
- 1 : 0;
21397 /* ABI defined personality routines. */
21399 /* Three opcodes bytes are packed into the first word. */
21406 /* The size and first two opcode bytes go in the first word. */
21407 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
21412 /* Should never happen. */
21416 /* Pack the opcodes into words (MSB first), reversing the list at the same
21418 while (unwind
.opcode_count
> 0)
21422 md_number_to_chars (ptr
, data
, 4);
21427 unwind
.opcode_count
--;
21429 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21432 /* Finish off the last word. */
21435 /* Pad with "finish" opcodes. */
21437 data
= (data
<< 8) | 0xb0;
21439 md_number_to_chars (ptr
, data
, 4);
21444 /* Add an empty descriptor if there is no user-specified data. */
21445 ptr
= frag_more (4);
21446 md_number_to_chars (ptr
, 0, 4);
21453 /* Initialize the DWARF-2 unwind information for this procedure. */
21456 tc_arm_frame_initial_instructions (void)
21458 cfi_add_CFA_def_cfa (REG_SP
, 0);
21460 #endif /* OBJ_ELF */
21462 /* Convert REGNAME to a DWARF-2 register number. */
21465 tc_arm_regname_to_dw2regnum (char *regname
)
21467 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
21471 /* PR 16694: Allow VFP registers as well. */
21472 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
21476 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
21485 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
21489 exp
.X_op
= O_secrel
;
21490 exp
.X_add_symbol
= symbol
;
21491 exp
.X_add_number
= 0;
21492 emit_expr (&exp
, size
);
21496 /* MD interface: Symbol and relocation handling. */
21498 /* Return the address within the segment that a PC-relative fixup is
21499 relative to. For ARM, PC-relative fixups applied to instructions
21500 are generally relative to the location of the fixup plus 8 bytes.
21501 Thumb branches are offset by 4, and Thumb loads relative to PC
21502 require special handling. */
21505 md_pcrel_from_section (fixS
* fixP
, segT seg
)
21507 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21509 /* If this is pc-relative and we are going to emit a relocation
21510 then we just want to put out any pipeline compensation that the linker
21511 will need. Otherwise we want to use the calculated base.
21512 For WinCE we skip the bias for externals as well, since this
21513 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21515 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
21516 || (arm_force_relocation (fixP
)
21518 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
21524 switch (fixP
->fx_r_type
)
21526 /* PC relative addressing on the Thumb is slightly odd as the
21527 bottom two bits of the PC are forced to zero for the
21528 calculation. This happens *after* application of the
21529 pipeline offset. However, Thumb adrl already adjusts for
21530 this, so we need not do it again. */
21531 case BFD_RELOC_ARM_THUMB_ADD
:
21534 case BFD_RELOC_ARM_THUMB_OFFSET
:
21535 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
21536 case BFD_RELOC_ARM_T32_ADD_PC12
:
21537 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
21538 return (base
+ 4) & ~3;
21540 /* Thumb branches are simply offset by +4. */
21541 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
21542 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
21543 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
21544 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21545 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21548 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21550 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21551 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21552 && ARM_IS_FUNC (fixP
->fx_addsy
)
21553 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21554 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21557 /* BLX is like branches above, but forces the low two bits of PC to
21559 case BFD_RELOC_THUMB_PCREL_BLX
:
21561 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21562 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21563 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21564 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21565 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21566 return (base
+ 4) & ~3;
21568 /* ARM mode branches are offset by +8. However, the Windows CE
21569 loader expects the relocation not to take this into account. */
21570 case BFD_RELOC_ARM_PCREL_BLX
:
21572 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21573 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21574 && ARM_IS_FUNC (fixP
->fx_addsy
)
21575 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21576 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21579 case BFD_RELOC_ARM_PCREL_CALL
:
21581 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21582 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21583 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21584 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21585 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21588 case BFD_RELOC_ARM_PCREL_BRANCH
:
21589 case BFD_RELOC_ARM_PCREL_JUMP
:
21590 case BFD_RELOC_ARM_PLT32
:
21592 /* When handling fixups immediately, because we have already
21593 discovered the value of a symbol, or the address of the frag involved
21594 we must account for the offset by +8, as the OS loader will never see the reloc.
21595 see fixup_segment() in write.c
21596 The S_IS_EXTERNAL test handles the case of global symbols.
21597 Those need the calculated base, not just the pipe compensation the linker will need. */
21599 && fixP
->fx_addsy
!= NULL
21600 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21601 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
21609 /* ARM mode loads relative to PC are also offset by +8. Unlike
21610 branches, the Windows CE loader *does* expect the relocation
21611 to take this into account. */
21612 case BFD_RELOC_ARM_OFFSET_IMM
:
21613 case BFD_RELOC_ARM_OFFSET_IMM8
:
21614 case BFD_RELOC_ARM_HWLITERAL
:
21615 case BFD_RELOC_ARM_LITERAL
:
21616 case BFD_RELOC_ARM_CP_OFF_IMM
:
21620 /* Other PC-relative relocations are un-offset. */
21626 static bfd_boolean flag_warn_syms
= TRUE
;
21629 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
21631 /* PR 18347 - Warn if the user attempts to create a symbol with the same
21632 name as an ARM instruction. Whilst strictly speaking it is allowed, it
21633 does mean that the resulting code might be very confusing to the reader.
21634 Also this warning can be triggered if the user omits an operand before
21635 an immediate address, eg:
21639 GAS treats this as an assignment of the value of the symbol foo to a
21640 symbol LDR, and so (without this code) it will not issue any kind of
21641 warning or error message.
21643 Note - ARM instructions are case-insensitive but the strings in the hash
21644 table are all stored in lower case, so we must first ensure that name is
21646 if (flag_warn_syms
&& arm_ops_hsh
)
21648 char * nbuf
= strdup (name
);
21651 for (p
= nbuf
; *p
; p
++)
21653 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
21655 static struct hash_control
* already_warned
= NULL
;
21657 if (already_warned
== NULL
)
21658 already_warned
= hash_new ();
21659 /* Only warn about the symbol once. To keep the code
21660 simple we let hash_insert do the lookup for us. */
21661 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
21662 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
21671 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21672 Otherwise we have no need to default values of symbols. */
21675 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
21678 if (name
[0] == '_' && name
[1] == 'G'
21679 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
21683 if (symbol_find (name
))
21684 as_bad (_("GOT already in the symbol table"));
21686 GOT_symbol
= symbol_new (name
, undefined_section
,
21687 (valueT
) 0, & zero_address_frag
);
21697 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21698 computed as two separate immediate values, added together. We
21699 already know that this value cannot be computed by just one ARM
21702 static unsigned int
21703 validate_immediate_twopart (unsigned int val
,
21704 unsigned int * highpart
)
21709 for (i
= 0; i
< 32; i
+= 2)
21710 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
21716 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
21718 else if (a
& 0xff0000)
21720 if (a
& 0xff000000)
21722 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
21726 gas_assert (a
& 0xff000000);
21727 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
21730 return (a
& 0xff) | (i
<< 7);
21737 validate_offset_imm (unsigned int val
, int hwse
)
21739 if ((hwse
&& val
> 255) || val
> 4095)
21744 /* Subroutine of md_apply_fix. Do those data_ops which can take a
21745 negative immediate constant by altering the instruction. A bit of
21750 by inverting the second operand, and
21753 by negating the second operand. */
21756 negate_data_op (unsigned long * instruction
,
21757 unsigned long value
)
21760 unsigned long negated
, inverted
;
21762 negated
= encode_arm_immediate (-value
);
21763 inverted
= encode_arm_immediate (~value
);
21765 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
21768 /* First negates. */
21769 case OPCODE_SUB
: /* ADD <-> SUB */
21770 new_inst
= OPCODE_ADD
;
21775 new_inst
= OPCODE_SUB
;
21779 case OPCODE_CMP
: /* CMP <-> CMN */
21780 new_inst
= OPCODE_CMN
;
21785 new_inst
= OPCODE_CMP
;
21789 /* Now Inverted ops. */
21790 case OPCODE_MOV
: /* MOV <-> MVN */
21791 new_inst
= OPCODE_MVN
;
21796 new_inst
= OPCODE_MOV
;
21800 case OPCODE_AND
: /* AND <-> BIC */
21801 new_inst
= OPCODE_BIC
;
21806 new_inst
= OPCODE_AND
;
21810 case OPCODE_ADC
: /* ADC <-> SBC */
21811 new_inst
= OPCODE_SBC
;
21816 new_inst
= OPCODE_ADC
;
21820 /* We cannot do anything. */
21825 if (value
== (unsigned) FAIL
)
21828 *instruction
&= OPCODE_MASK
;
21829 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
21833 /* Like negate_data_op, but for Thumb-2. */
21835 static unsigned int
21836 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
21840 unsigned int negated
, inverted
;
21842 negated
= encode_thumb32_immediate (-value
);
21843 inverted
= encode_thumb32_immediate (~value
);
21845 rd
= (*instruction
>> 8) & 0xf;
21846 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
21849 /* ADD <-> SUB. Includes CMP <-> CMN. */
21850 case T2_OPCODE_SUB
:
21851 new_inst
= T2_OPCODE_ADD
;
21855 case T2_OPCODE_ADD
:
21856 new_inst
= T2_OPCODE_SUB
;
21860 /* ORR <-> ORN. Includes MOV <-> MVN. */
21861 case T2_OPCODE_ORR
:
21862 new_inst
= T2_OPCODE_ORN
;
21866 case T2_OPCODE_ORN
:
21867 new_inst
= T2_OPCODE_ORR
;
21871 /* AND <-> BIC. TST has no inverted equivalent. */
21872 case T2_OPCODE_AND
:
21873 new_inst
= T2_OPCODE_BIC
;
21880 case T2_OPCODE_BIC
:
21881 new_inst
= T2_OPCODE_AND
;
21886 case T2_OPCODE_ADC
:
21887 new_inst
= T2_OPCODE_SBC
;
21891 case T2_OPCODE_SBC
:
21892 new_inst
= T2_OPCODE_ADC
;
21896 /* We cannot do anything. */
21901 if (value
== (unsigned int)FAIL
)
21904 *instruction
&= T2_OPCODE_MASK
;
21905 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
21909 /* Read a 32-bit thumb instruction from buf. */
21910 static unsigned long
21911 get_thumb32_insn (char * buf
)
21913 unsigned long insn
;
21914 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
21915 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
21921 /* We usually want to set the low bit on the address of thumb function
21922 symbols. In particular .word foo - . should have the low bit set.
21923 Generic code tries to fold the difference of two symbols to
21924 a constant. Prevent this and force a relocation when the first symbols
21925 is a thumb function. */
21928 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
21930 if (op
== O_subtract
21931 && l
->X_op
== O_symbol
21932 && r
->X_op
== O_symbol
21933 && THUMB_IS_FUNC (l
->X_add_symbol
))
21935 l
->X_op
= O_subtract
;
21936 l
->X_op_symbol
= r
->X_add_symbol
;
21937 l
->X_add_number
-= r
->X_add_number
;
21941 /* Process as normal. */
21945 /* Encode Thumb2 unconditional branches and calls. The encoding
21946 for the 2 are identical for the immediate values. */
21949 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
21951 #define T2I1I2MASK ((1 << 13) | (1 << 11))
21954 addressT S
, I1
, I2
, lo
, hi
;
21956 S
= (value
>> 24) & 0x01;
21957 I1
= (value
>> 23) & 0x01;
21958 I2
= (value
>> 22) & 0x01;
21959 hi
= (value
>> 12) & 0x3ff;
21960 lo
= (value
>> 1) & 0x7ff;
21961 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
21962 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
21963 newval
|= (S
<< 10) | hi
;
21964 newval2
&= ~T2I1I2MASK
;
21965 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
21966 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
21967 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
21971 md_apply_fix (fixS
* fixP
,
21975 offsetT value
= * valP
;
21977 unsigned int newimm
;
21978 unsigned long temp
;
21980 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
21982 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
21984 /* Note whether this will delete the relocation. */
21986 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
21989 /* On a 64-bit host, silently truncate 'value' to 32 bits for
21990 consistency with the behaviour on 32-bit hosts. Remember value
21992 value
&= 0xffffffff;
21993 value
^= 0x80000000;
21994 value
-= 0x80000000;
21997 fixP
->fx_addnumber
= value
;
21999 /* Same treatment for fixP->fx_offset. */
22000 fixP
->fx_offset
&= 0xffffffff;
22001 fixP
->fx_offset
^= 0x80000000;
22002 fixP
->fx_offset
-= 0x80000000;
22004 switch (fixP
->fx_r_type
)
22006 case BFD_RELOC_NONE
:
22007 /* This will need to go in the object file. */
22011 case BFD_RELOC_ARM_IMMEDIATE
:
22012 /* We claim that this fixup has been processed here,
22013 even if in fact we generate an error because we do
22014 not have a reloc for it, so tc_gen_reloc will reject it. */
22017 if (fixP
->fx_addsy
)
22019 const char *msg
= 0;
22021 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22022 msg
= _("undefined symbol %s used as an immediate value");
22023 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22024 msg
= _("symbol %s is in a different section");
22025 else if (S_IS_WEAK (fixP
->fx_addsy
))
22026 msg
= _("symbol %s is weak and may be overridden later");
22030 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22031 msg
, S_GET_NAME (fixP
->fx_addsy
));
22036 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22038 /* If the offset is negative, we should use encoding A2 for ADR. */
22039 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22040 newimm
= negate_data_op (&temp
, value
);
22043 newimm
= encode_arm_immediate (value
);
22045 /* If the instruction will fail, see if we can fix things up by
22046 changing the opcode. */
22047 if (newimm
== (unsigned int) FAIL
)
22048 newimm
= negate_data_op (&temp
, value
);
22051 if (newimm
== (unsigned int) FAIL
)
22053 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22054 _("invalid constant (%lx) after fixup"),
22055 (unsigned long) value
);
22059 newimm
|= (temp
& 0xfffff000);
22060 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22063 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22065 unsigned int highpart
= 0;
22066 unsigned int newinsn
= 0xe1a00000; /* nop. */
22068 if (fixP
->fx_addsy
)
22070 const char *msg
= 0;
22072 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22073 msg
= _("undefined symbol %s used as an immediate value");
22074 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22075 msg
= _("symbol %s is in a different section");
22076 else if (S_IS_WEAK (fixP
->fx_addsy
))
22077 msg
= _("symbol %s is weak and may be overridden later");
22081 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22082 msg
, S_GET_NAME (fixP
->fx_addsy
));
22087 newimm
= encode_arm_immediate (value
);
22088 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22090 /* If the instruction will fail, see if we can fix things up by
22091 changing the opcode. */
22092 if (newimm
== (unsigned int) FAIL
22093 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22095 /* No ? OK - try using two ADD instructions to generate
22097 newimm
= validate_immediate_twopart (value
, & highpart
);
22099 /* Yes - then make sure that the second instruction is
22101 if (newimm
!= (unsigned int) FAIL
)
22103 /* Still No ? Try using a negated value. */
22104 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22105 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22106 /* Otherwise - give up. */
22109 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22110 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22115 /* Replace the first operand in the 2nd instruction (which
22116 is the PC) with the destination register. We have
22117 already added in the PC in the first instruction and we
22118 do not want to do it again. */
22119 newinsn
&= ~ 0xf0000;
22120 newinsn
|= ((newinsn
& 0x0f000) << 4);
22123 newimm
|= (temp
& 0xfffff000);
22124 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22126 highpart
|= (newinsn
& 0xfffff000);
22127 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
22131 case BFD_RELOC_ARM_OFFSET_IMM
:
22132 if (!fixP
->fx_done
&& seg
->use_rela_p
)
22135 case BFD_RELOC_ARM_LITERAL
:
22141 if (validate_offset_imm (value
, 0) == FAIL
)
22143 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
22144 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22145 _("invalid literal constant: pool needs to be closer"));
22147 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22148 _("bad immediate value for offset (%ld)"),
22153 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22155 newval
&= 0xfffff000;
22158 newval
&= 0xff7ff000;
22159 newval
|= value
| (sign
? INDEX_UP
: 0);
22161 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22164 case BFD_RELOC_ARM_OFFSET_IMM8
:
22165 case BFD_RELOC_ARM_HWLITERAL
:
22171 if (validate_offset_imm (value
, 1) == FAIL
)
22173 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
22174 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22175 _("invalid literal constant: pool needs to be closer"));
22177 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22178 _("bad immediate value for 8-bit offset (%ld)"),
22183 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22185 newval
&= 0xfffff0f0;
22188 newval
&= 0xff7ff0f0;
22189 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
22191 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22194 case BFD_RELOC_ARM_T32_OFFSET_U8
:
22195 if (value
< 0 || value
> 1020 || value
% 4 != 0)
22196 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22197 _("bad immediate value for offset (%ld)"), (long) value
);
22200 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
22202 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
22205 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22206 /* This is a complicated relocation used for all varieties of Thumb32
22207 load/store instruction with immediate offset:
22209 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22210 *4, optional writeback(W)
22211 (doubleword load/store)
22213 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22214 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22215 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22216 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22217 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22219 Uppercase letters indicate bits that are already encoded at
22220 this point. Lowercase letters are our problem. For the
22221 second block of instructions, the secondary opcode nybble
22222 (bits 8..11) is present, and bit 23 is zero, even if this is
22223 a PC-relative operation. */
22224 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22226 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
22228 if ((newval
& 0xf0000000) == 0xe0000000)
22230 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22232 newval
|= (1 << 23);
22235 if (value
% 4 != 0)
22237 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22238 _("offset not a multiple of 4"));
22244 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22245 _("offset out of range"));
22250 else if ((newval
& 0x000f0000) == 0x000f0000)
22252 /* PC-relative, 12-bit offset. */
22254 newval
|= (1 << 23);
22259 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22260 _("offset out of range"));
22265 else if ((newval
& 0x00000100) == 0x00000100)
22267 /* Writeback: 8-bit, +/- offset. */
22269 newval
|= (1 << 9);
22274 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22275 _("offset out of range"));
22280 else if ((newval
& 0x00000f00) == 0x00000e00)
22282 /* T-instruction: positive 8-bit offset. */
22283 if (value
< 0 || value
> 0xff)
22285 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22286 _("offset out of range"));
22294 /* Positive 12-bit or negative 8-bit offset. */
22298 newval
|= (1 << 23);
22308 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22309 _("offset out of range"));
22316 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
22317 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
22320 case BFD_RELOC_ARM_SHIFT_IMM
:
22321 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22322 if (((unsigned long) value
) > 32
22324 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
22326 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22327 _("shift expression is too large"));
22332 /* Shifts of zero must be done as lsl. */
22334 else if (value
== 32)
22336 newval
&= 0xfffff07f;
22337 newval
|= (value
& 0x1f) << 7;
22338 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22341 case BFD_RELOC_ARM_T32_IMMEDIATE
:
22342 case BFD_RELOC_ARM_T32_ADD_IMM
:
22343 case BFD_RELOC_ARM_T32_IMM12
:
22344 case BFD_RELOC_ARM_T32_ADD_PC12
:
22345 /* We claim that this fixup has been processed here,
22346 even if in fact we generate an error because we do
22347 not have a reloc for it, so tc_gen_reloc will reject it. */
22351 && ! S_IS_DEFINED (fixP
->fx_addsy
))
22353 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22354 _("undefined symbol %s used as an immediate value"),
22355 S_GET_NAME (fixP
->fx_addsy
));
22359 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22361 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
22364 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
22365 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22367 newimm
= encode_thumb32_immediate (value
);
22368 if (newimm
== (unsigned int) FAIL
)
22369 newimm
= thumb32_negate_data_op (&newval
, value
);
22371 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
22372 && newimm
== (unsigned int) FAIL
)
22374 /* Turn add/sum into addw/subw. */
22375 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22376 newval
= (newval
& 0xfeffffff) | 0x02000000;
22377 /* No flat 12-bit imm encoding for addsw/subsw. */
22378 if ((newval
& 0x00100000) == 0)
22380 /* 12 bit immediate for addw/subw. */
22384 newval
^= 0x00a00000;
22387 newimm
= (unsigned int) FAIL
;
22393 if (newimm
== (unsigned int)FAIL
)
22395 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22396 _("invalid constant (%lx) after fixup"),
22397 (unsigned long) value
);
22401 newval
|= (newimm
& 0x800) << 15;
22402 newval
|= (newimm
& 0x700) << 4;
22403 newval
|= (newimm
& 0x0ff);
22405 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
22406 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
22409 case BFD_RELOC_ARM_SMC
:
22410 if (((unsigned long) value
) > 0xffff)
22411 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22412 _("invalid smc expression"));
22413 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22414 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22415 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22418 case BFD_RELOC_ARM_HVC
:
22419 if (((unsigned long) value
) > 0xffff)
22420 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22421 _("invalid hvc expression"));
22422 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22423 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22424 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22427 case BFD_RELOC_ARM_SWI
:
22428 if (fixP
->tc_fix_data
!= 0)
22430 if (((unsigned long) value
) > 0xff)
22431 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22432 _("invalid swi expression"));
22433 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22435 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22439 if (((unsigned long) value
) > 0x00ffffff)
22440 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22441 _("invalid swi expression"));
22442 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22444 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22448 case BFD_RELOC_ARM_MULTI
:
22449 if (((unsigned long) value
) > 0xffff)
22450 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22451 _("invalid expression in load/store multiple"));
22452 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
22453 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22457 case BFD_RELOC_ARM_PCREL_CALL
:
22459 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22461 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22462 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22463 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22464 /* Flip the bl to blx. This is a simple flip
22465 bit here because we generate PCREL_CALL for
22466 unconditional bls. */
22468 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22469 newval
= newval
| 0x10000000;
22470 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22476 goto arm_branch_common
;
22478 case BFD_RELOC_ARM_PCREL_JUMP
:
22479 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22481 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22482 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22483 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22485 /* This would map to a bl<cond>, b<cond>,
22486 b<always> to a Thumb function. We
22487 need to force a relocation for this particular
22489 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22493 case BFD_RELOC_ARM_PLT32
:
22495 case BFD_RELOC_ARM_PCREL_BRANCH
:
22497 goto arm_branch_common
;
22499 case BFD_RELOC_ARM_PCREL_BLX
:
22502 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22504 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22505 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22506 && ARM_IS_FUNC (fixP
->fx_addsy
))
22508 /* Flip the blx to a bl and warn. */
22509 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
22510 newval
= 0xeb000000;
22511 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
22512 _("blx to '%s' an ARM ISA state function changed to bl"),
22514 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22520 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
22521 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
22525 /* We are going to store value (shifted right by two) in the
22526 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22527 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22528 also be be clear. */
22530 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22531 _("misaligned branch destination"));
22532 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
22533 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
22534 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22536 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22538 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22539 newval
|= (value
>> 2) & 0x00ffffff;
22540 /* Set the H bit on BLX instructions. */
22544 newval
|= 0x01000000;
22546 newval
&= ~0x01000000;
22548 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22552 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
22553 /* CBZ can only branch forward. */
22555 /* Attempts to use CBZ to branch to the next instruction
22556 (which, strictly speaking, are prohibited) will be turned into
22559 FIXME: It may be better to remove the instruction completely and
22560 perform relaxation. */
22563 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22564 newval
= 0xbf00; /* NOP encoding T1 */
22565 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22570 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22572 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22574 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22575 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
22576 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22581 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
22582 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
22583 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22585 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22587 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22588 newval
|= (value
& 0x1ff) >> 1;
22589 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22593 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
22594 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
22595 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22597 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22599 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22600 newval
|= (value
& 0xfff) >> 1;
22601 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22605 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22607 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22608 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22609 && ARM_IS_FUNC (fixP
->fx_addsy
)
22610 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22612 /* Force a relocation for a branch 20 bits wide. */
22615 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
22616 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22617 _("conditional branch out of range"));
22619 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22622 addressT S
, J1
, J2
, lo
, hi
;
22624 S
= (value
& 0x00100000) >> 20;
22625 J2
= (value
& 0x00080000) >> 19;
22626 J1
= (value
& 0x00040000) >> 18;
22627 hi
= (value
& 0x0003f000) >> 12;
22628 lo
= (value
& 0x00000ffe) >> 1;
22630 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22631 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22632 newval
|= (S
<< 10) | hi
;
22633 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
22634 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22635 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22639 case BFD_RELOC_THUMB_PCREL_BLX
:
22640 /* If there is a blx from a thumb state function to
22641 another thumb function flip this to a bl and warn
22645 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22646 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22647 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22649 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
22650 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
22651 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22653 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22654 newval
= newval
| 0x1000;
22655 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
22656 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
22661 goto thumb_bl_common
;
22663 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22664 /* A bl from Thumb state ISA to an internal ARM state function
22665 is converted to a blx. */
22667 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22668 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22669 && ARM_IS_FUNC (fixP
->fx_addsy
)
22670 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22672 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22673 newval
= newval
& ~0x1000;
22674 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
22675 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
22681 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
22682 /* For a BLX instruction, make sure that the relocation is rounded up
22683 to a word boundary. This follows the semantics of the instruction
22684 which specifies that bit 1 of the target address will come from bit
22685 1 of the base address. */
22686 value
= (value
+ 3) & ~ 3;
22689 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
22690 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
22691 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
22694 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
22696 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
)))
22697 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22698 else if ((value
& ~0x1ffffff)
22699 && ((value
& ~0x1ffffff) != ~0x1ffffff))
22700 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22701 _("Thumb2 branch out of range"));
22704 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22705 encode_thumb2_b_bl_offset (buf
, value
);
22709 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
22710 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
22711 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22713 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22714 encode_thumb2_b_bl_offset (buf
, value
);
22719 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22724 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22725 md_number_to_chars (buf
, value
, 2);
22729 case BFD_RELOC_ARM_TLS_CALL
:
22730 case BFD_RELOC_ARM_THM_TLS_CALL
:
22731 case BFD_RELOC_ARM_TLS_DESCSEQ
:
22732 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
22733 case BFD_RELOC_ARM_TLS_GOTDESC
:
22734 case BFD_RELOC_ARM_TLS_GD32
:
22735 case BFD_RELOC_ARM_TLS_LE32
:
22736 case BFD_RELOC_ARM_TLS_IE32
:
22737 case BFD_RELOC_ARM_TLS_LDM32
:
22738 case BFD_RELOC_ARM_TLS_LDO32
:
22739 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
22742 case BFD_RELOC_ARM_GOT32
:
22743 case BFD_RELOC_ARM_GOTOFF
:
22746 case BFD_RELOC_ARM_GOT_PREL
:
22747 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22748 md_number_to_chars (buf
, value
, 4);
22751 case BFD_RELOC_ARM_TARGET2
:
22752 /* TARGET2 is not partial-inplace, so we need to write the
22753 addend here for REL targets, because it won't be written out
22754 during reloc processing later. */
22755 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22756 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
22760 case BFD_RELOC_RVA
:
22762 case BFD_RELOC_ARM_TARGET1
:
22763 case BFD_RELOC_ARM_ROSEGREL32
:
22764 case BFD_RELOC_ARM_SBREL32
:
22765 case BFD_RELOC_32_PCREL
:
22767 case BFD_RELOC_32_SECREL
:
22769 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22771 /* For WinCE we only do this for pcrel fixups. */
22772 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
22774 md_number_to_chars (buf
, value
, 4);
22778 case BFD_RELOC_ARM_PREL31
:
22779 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22781 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
22782 if ((value
^ (value
>> 1)) & 0x40000000)
22784 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22785 _("rel31 relocation overflow"));
22787 newval
|= value
& 0x7fffffff;
22788 md_number_to_chars (buf
, newval
, 4);
22793 case BFD_RELOC_ARM_CP_OFF_IMM
:
22794 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
22795 if (value
< -1023 || value
> 1023 || (value
& 3))
22796 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22797 _("co-processor offset out of range"));
22802 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
22803 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
22804 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22806 newval
= get_thumb32_insn (buf
);
22808 newval
&= 0xffffff00;
22811 newval
&= 0xff7fff00;
22812 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
22814 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
22815 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
22816 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22818 put_thumb32_insn (buf
, newval
);
22821 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
22822 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
22823 if (value
< -255 || value
> 255)
22824 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22825 _("co-processor offset out of range"));
22827 goto cp_off_common
;
22829 case BFD_RELOC_ARM_THUMB_OFFSET
:
22830 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22831 /* Exactly what ranges, and where the offset is inserted depends
22832 on the type of instruction, we can establish this from the
22834 switch (newval
>> 12)
22836 case 4: /* PC load. */
22837 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22838 forced to zero for these loads; md_pcrel_from has already
22839 compensated for this. */
22841 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22842 _("invalid offset, target not word aligned (0x%08lX)"),
22843 (((unsigned long) fixP
->fx_frag
->fr_address
22844 + (unsigned long) fixP
->fx_where
) & ~3)
22845 + (unsigned long) value
);
22847 if (value
& ~0x3fc)
22848 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22849 _("invalid offset, value too big (0x%08lX)"),
22852 newval
|= value
>> 2;
22855 case 9: /* SP load/store. */
22856 if (value
& ~0x3fc)
22857 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22858 _("invalid offset, value too big (0x%08lX)"),
22860 newval
|= value
>> 2;
22863 case 6: /* Word load/store. */
22865 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22866 _("invalid offset, value too big (0x%08lX)"),
22868 newval
|= value
<< 4; /* 6 - 2. */
22871 case 7: /* Byte load/store. */
22873 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22874 _("invalid offset, value too big (0x%08lX)"),
22876 newval
|= value
<< 6;
22879 case 8: /* Halfword load/store. */
22881 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22882 _("invalid offset, value too big (0x%08lX)"),
22884 newval
|= value
<< 5; /* 6 - 1. */
22888 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22889 "Unable to process relocation for thumb opcode: %lx",
22890 (unsigned long) newval
);
22893 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22896 case BFD_RELOC_ARM_THUMB_ADD
:
22897 /* This is a complicated relocation, since we use it for all of
22898 the following immediate relocations:
22902 9bit ADD/SUB SP word-aligned
22903 10bit ADD PC/SP word-aligned
22905 The type of instruction being processed is encoded in the
22912 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22914 int rd
= (newval
>> 4) & 0xf;
22915 int rs
= newval
& 0xf;
22916 int subtract
= !!(newval
& 0x8000);
22918 /* Check for HI regs, only very restricted cases allowed:
22919 Adjusting SP, and using PC or SP to get an address. */
22920 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
22921 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
22922 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22923 _("invalid Hi register with immediate"));
22925 /* If value is negative, choose the opposite instruction. */
22929 subtract
= !subtract
;
22931 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22932 _("immediate value out of range"));
22937 if (value
& ~0x1fc)
22938 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22939 _("invalid immediate for stack address calculation"));
22940 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
22941 newval
|= value
>> 2;
22943 else if (rs
== REG_PC
|| rs
== REG_SP
)
22945 if (subtract
|| value
& ~0x3fc)
22946 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22947 _("invalid immediate for address calculation (value = 0x%08lX)"),
22948 (unsigned long) value
);
22949 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
22951 newval
|= value
>> 2;
22956 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22957 _("immediate value out of range"));
22958 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
22959 newval
|= (rd
<< 8) | value
;
22964 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22965 _("immediate value out of range"));
22966 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
22967 newval
|= rd
| (rs
<< 3) | (value
<< 6);
22970 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22973 case BFD_RELOC_ARM_THUMB_IMM
:
22974 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22975 if (value
< 0 || value
> 255)
22976 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22977 _("invalid immediate: %ld is out of range"),
22980 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22983 case BFD_RELOC_ARM_THUMB_SHIFT
:
22984 /* 5bit shift value (0..32). LSL cannot take 32. */
22985 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
22986 temp
= newval
& 0xf800;
22987 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
22988 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22989 _("invalid shift value: %ld"), (long) value
);
22990 /* Shifts of zero must be encoded as LSL. */
22992 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
22993 /* Shifts of 32 are encoded as zero. */
22994 else if (value
== 32)
22996 newval
|= value
<< 6;
22997 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23000 case BFD_RELOC_VTABLE_INHERIT
:
23001 case BFD_RELOC_VTABLE_ENTRY
:
23005 case BFD_RELOC_ARM_MOVW
:
23006 case BFD_RELOC_ARM_MOVT
:
23007 case BFD_RELOC_ARM_THUMB_MOVW
:
23008 case BFD_RELOC_ARM_THUMB_MOVT
:
23009 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23011 /* REL format relocations are limited to a 16-bit addend. */
23012 if (!fixP
->fx_done
)
23014 if (value
< -0x8000 || value
> 0x7fff)
23015 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23016 _("offset out of range"));
23018 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23019 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23024 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23025 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23027 newval
= get_thumb32_insn (buf
);
23028 newval
&= 0xfbf08f00;
23029 newval
|= (value
& 0xf000) << 4;
23030 newval
|= (value
& 0x0800) << 15;
23031 newval
|= (value
& 0x0700) << 4;
23032 newval
|= (value
& 0x00ff);
23033 put_thumb32_insn (buf
, newval
);
23037 newval
= md_chars_to_number (buf
, 4);
23038 newval
&= 0xfff0f000;
23039 newval
|= value
& 0x0fff;
23040 newval
|= (value
& 0xf000) << 4;
23041 md_number_to_chars (buf
, newval
, 4);
23046 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23047 case BFD_RELOC_ARM_ALU_PC_G0
:
23048 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23049 case BFD_RELOC_ARM_ALU_PC_G1
:
23050 case BFD_RELOC_ARM_ALU_PC_G2
:
23051 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23052 case BFD_RELOC_ARM_ALU_SB_G0
:
23053 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23054 case BFD_RELOC_ARM_ALU_SB_G1
:
23055 case BFD_RELOC_ARM_ALU_SB_G2
:
23056 gas_assert (!fixP
->fx_done
);
23057 if (!seg
->use_rela_p
)
23060 bfd_vma encoded_addend
;
23061 bfd_vma addend_abs
= abs (value
);
23063 /* Check that the absolute value of the addend can be
23064 expressed as an 8-bit constant plus a rotation. */
23065 encoded_addend
= encode_arm_immediate (addend_abs
);
23066 if (encoded_addend
== (unsigned int) FAIL
)
23067 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23068 _("the offset 0x%08lX is not representable"),
23069 (unsigned long) addend_abs
);
23071 /* Extract the instruction. */
23072 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23074 /* If the addend is positive, use an ADD instruction.
23075 Otherwise use a SUB. Take care not to destroy the S bit. */
23076 insn
&= 0xff1fffff;
23082 /* Place the encoded addend into the first 12 bits of the
23084 insn
&= 0xfffff000;
23085 insn
|= encoded_addend
;
23087 /* Update the instruction. */
23088 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23092 case BFD_RELOC_ARM_LDR_PC_G0
:
23093 case BFD_RELOC_ARM_LDR_PC_G1
:
23094 case BFD_RELOC_ARM_LDR_PC_G2
:
23095 case BFD_RELOC_ARM_LDR_SB_G0
:
23096 case BFD_RELOC_ARM_LDR_SB_G1
:
23097 case BFD_RELOC_ARM_LDR_SB_G2
:
23098 gas_assert (!fixP
->fx_done
);
23099 if (!seg
->use_rela_p
)
23102 bfd_vma addend_abs
= abs (value
);
23104 /* Check that the absolute value of the addend can be
23105 encoded in 12 bits. */
23106 if (addend_abs
>= 0x1000)
23107 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23108 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23109 (unsigned long) addend_abs
);
23111 /* Extract the instruction. */
23112 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23114 /* If the addend is negative, clear bit 23 of the instruction.
23115 Otherwise set it. */
23117 insn
&= ~(1 << 23);
23121 /* Place the absolute value of the addend into the first 12 bits
23122 of the instruction. */
23123 insn
&= 0xfffff000;
23124 insn
|= addend_abs
;
23126 /* Update the instruction. */
23127 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23131 case BFD_RELOC_ARM_LDRS_PC_G0
:
23132 case BFD_RELOC_ARM_LDRS_PC_G1
:
23133 case BFD_RELOC_ARM_LDRS_PC_G2
:
23134 case BFD_RELOC_ARM_LDRS_SB_G0
:
23135 case BFD_RELOC_ARM_LDRS_SB_G1
:
23136 case BFD_RELOC_ARM_LDRS_SB_G2
:
23137 gas_assert (!fixP
->fx_done
);
23138 if (!seg
->use_rela_p
)
23141 bfd_vma addend_abs
= abs (value
);
23143 /* Check that the absolute value of the addend can be
23144 encoded in 8 bits. */
23145 if (addend_abs
>= 0x100)
23146 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23147 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23148 (unsigned long) addend_abs
);
23150 /* Extract the instruction. */
23151 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23153 /* If the addend is negative, clear bit 23 of the instruction.
23154 Otherwise set it. */
23156 insn
&= ~(1 << 23);
23160 /* Place the first four bits of the absolute value of the addend
23161 into the first 4 bits of the instruction, and the remaining
23162 four into bits 8 .. 11. */
23163 insn
&= 0xfffff0f0;
23164 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
23166 /* Update the instruction. */
23167 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23171 case BFD_RELOC_ARM_LDC_PC_G0
:
23172 case BFD_RELOC_ARM_LDC_PC_G1
:
23173 case BFD_RELOC_ARM_LDC_PC_G2
:
23174 case BFD_RELOC_ARM_LDC_SB_G0
:
23175 case BFD_RELOC_ARM_LDC_SB_G1
:
23176 case BFD_RELOC_ARM_LDC_SB_G2
:
23177 gas_assert (!fixP
->fx_done
);
23178 if (!seg
->use_rela_p
)
23181 bfd_vma addend_abs
= abs (value
);
23183 /* Check that the absolute value of the addend is a multiple of
23184 four and, when divided by four, fits in 8 bits. */
23185 if (addend_abs
& 0x3)
23186 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23187 _("bad offset 0x%08lX (must be word-aligned)"),
23188 (unsigned long) addend_abs
);
23190 if ((addend_abs
>> 2) > 0xff)
23191 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23192 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23193 (unsigned long) addend_abs
);
23195 /* Extract the instruction. */
23196 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23198 /* If the addend is negative, clear bit 23 of the instruction.
23199 Otherwise set it. */
23201 insn
&= ~(1 << 23);
23205 /* Place the addend (divided by four) into the first eight
23206 bits of the instruction. */
23207 insn
&= 0xfffffff0;
23208 insn
|= addend_abs
>> 2;
23210 /* Update the instruction. */
23211 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23215 case BFD_RELOC_ARM_V4BX
:
23216 /* This will need to go in the object file. */
23220 case BFD_RELOC_UNUSED
:
23222 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23223 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
23227 /* Translate internal representation of relocation info to BFD target
23231 tc_gen_reloc (asection
*section
, fixS
*fixp
)
23234 bfd_reloc_code_real_type code
;
23236 reloc
= (arelent
*) xmalloc (sizeof (arelent
));
23238 reloc
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
23239 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
23240 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
23242 if (fixp
->fx_pcrel
)
23244 if (section
->use_rela_p
)
23245 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
23247 fixp
->fx_offset
= reloc
->address
;
23249 reloc
->addend
= fixp
->fx_offset
;
23251 switch (fixp
->fx_r_type
)
23254 if (fixp
->fx_pcrel
)
23256 code
= BFD_RELOC_8_PCREL
;
23261 if (fixp
->fx_pcrel
)
23263 code
= BFD_RELOC_16_PCREL
;
23268 if (fixp
->fx_pcrel
)
23270 code
= BFD_RELOC_32_PCREL
;
23274 case BFD_RELOC_ARM_MOVW
:
23275 if (fixp
->fx_pcrel
)
23277 code
= BFD_RELOC_ARM_MOVW_PCREL
;
23281 case BFD_RELOC_ARM_MOVT
:
23282 if (fixp
->fx_pcrel
)
23284 code
= BFD_RELOC_ARM_MOVT_PCREL
;
23288 case BFD_RELOC_ARM_THUMB_MOVW
:
23289 if (fixp
->fx_pcrel
)
23291 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
23295 case BFD_RELOC_ARM_THUMB_MOVT
:
23296 if (fixp
->fx_pcrel
)
23298 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
23302 case BFD_RELOC_NONE
:
23303 case BFD_RELOC_ARM_PCREL_BRANCH
:
23304 case BFD_RELOC_ARM_PCREL_BLX
:
23305 case BFD_RELOC_RVA
:
23306 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23307 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23308 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23309 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23310 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23311 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23312 case BFD_RELOC_VTABLE_ENTRY
:
23313 case BFD_RELOC_VTABLE_INHERIT
:
23315 case BFD_RELOC_32_SECREL
:
23317 code
= fixp
->fx_r_type
;
23320 case BFD_RELOC_THUMB_PCREL_BLX
:
23322 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23323 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23326 code
= BFD_RELOC_THUMB_PCREL_BLX
;
23329 case BFD_RELOC_ARM_LITERAL
:
23330 case BFD_RELOC_ARM_HWLITERAL
:
23331 /* If this is called then the a literal has
23332 been referenced across a section boundary. */
23333 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23334 _("literal referenced across section boundary"));
23338 case BFD_RELOC_ARM_TLS_CALL
:
23339 case BFD_RELOC_ARM_THM_TLS_CALL
:
23340 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23341 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23342 case BFD_RELOC_ARM_GOT32
:
23343 case BFD_RELOC_ARM_GOTOFF
:
23344 case BFD_RELOC_ARM_GOT_PREL
:
23345 case BFD_RELOC_ARM_PLT32
:
23346 case BFD_RELOC_ARM_TARGET1
:
23347 case BFD_RELOC_ARM_ROSEGREL32
:
23348 case BFD_RELOC_ARM_SBREL32
:
23349 case BFD_RELOC_ARM_PREL31
:
23350 case BFD_RELOC_ARM_TARGET2
:
23351 case BFD_RELOC_ARM_TLS_LE32
:
23352 case BFD_RELOC_ARM_TLS_LDO32
:
23353 case BFD_RELOC_ARM_PCREL_CALL
:
23354 case BFD_RELOC_ARM_PCREL_JUMP
:
23355 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23356 case BFD_RELOC_ARM_ALU_PC_G0
:
23357 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23358 case BFD_RELOC_ARM_ALU_PC_G1
:
23359 case BFD_RELOC_ARM_ALU_PC_G2
:
23360 case BFD_RELOC_ARM_LDR_PC_G0
:
23361 case BFD_RELOC_ARM_LDR_PC_G1
:
23362 case BFD_RELOC_ARM_LDR_PC_G2
:
23363 case BFD_RELOC_ARM_LDRS_PC_G0
:
23364 case BFD_RELOC_ARM_LDRS_PC_G1
:
23365 case BFD_RELOC_ARM_LDRS_PC_G2
:
23366 case BFD_RELOC_ARM_LDC_PC_G0
:
23367 case BFD_RELOC_ARM_LDC_PC_G1
:
23368 case BFD_RELOC_ARM_LDC_PC_G2
:
23369 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23370 case BFD_RELOC_ARM_ALU_SB_G0
:
23371 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23372 case BFD_RELOC_ARM_ALU_SB_G1
:
23373 case BFD_RELOC_ARM_ALU_SB_G2
:
23374 case BFD_RELOC_ARM_LDR_SB_G0
:
23375 case BFD_RELOC_ARM_LDR_SB_G1
:
23376 case BFD_RELOC_ARM_LDR_SB_G2
:
23377 case BFD_RELOC_ARM_LDRS_SB_G0
:
23378 case BFD_RELOC_ARM_LDRS_SB_G1
:
23379 case BFD_RELOC_ARM_LDRS_SB_G2
:
23380 case BFD_RELOC_ARM_LDC_SB_G0
:
23381 case BFD_RELOC_ARM_LDC_SB_G1
:
23382 case BFD_RELOC_ARM_LDC_SB_G2
:
23383 case BFD_RELOC_ARM_V4BX
:
23384 code
= fixp
->fx_r_type
;
23387 case BFD_RELOC_ARM_TLS_GOTDESC
:
23388 case BFD_RELOC_ARM_TLS_GD32
:
23389 case BFD_RELOC_ARM_TLS_IE32
:
23390 case BFD_RELOC_ARM_TLS_LDM32
:
23391 /* BFD will include the symbol's address in the addend.
23392 But we don't want that, so subtract it out again here. */
23393 if (!S_IS_COMMON (fixp
->fx_addsy
))
23394 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
23395 code
= fixp
->fx_r_type
;
23399 case BFD_RELOC_ARM_IMMEDIATE
:
23400 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23401 _("internal relocation (type: IMMEDIATE) not fixed up"));
23404 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23405 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23406 _("ADRL used for a symbol not defined in the same file"));
23409 case BFD_RELOC_ARM_OFFSET_IMM
:
23410 if (section
->use_rela_p
)
23412 code
= fixp
->fx_r_type
;
23416 if (fixp
->fx_addsy
!= NULL
23417 && !S_IS_DEFINED (fixp
->fx_addsy
)
23418 && S_IS_LOCAL (fixp
->fx_addsy
))
23420 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23421 _("undefined local label `%s'"),
23422 S_GET_NAME (fixp
->fx_addsy
));
23426 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23427 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23434 switch (fixp
->fx_r_type
)
23436 case BFD_RELOC_NONE
: type
= "NONE"; break;
23437 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
23438 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
23439 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
23440 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
23441 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
23442 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
23443 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
23444 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
23445 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
23446 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
23447 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
23448 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
23449 default: type
= _("<unknown>"); break;
23451 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23452 _("cannot represent %s relocation in this object file format"),
23459 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
23461 && fixp
->fx_addsy
== GOT_symbol
)
23463 code
= BFD_RELOC_ARM_GOTPC
;
23464 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
23468 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
23470 if (reloc
->howto
== NULL
)
23472 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23473 _("cannot represent %s relocation in this object file format"),
23474 bfd_get_reloc_code_name (code
));
23478 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23479 vtable entry to be used in the relocation's section offset. */
23480 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
23481 reloc
->address
= fixp
->fx_offset
;
23486 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23489 cons_fix_new_arm (fragS
* frag
,
23493 bfd_reloc_code_real_type reloc
)
23498 FIXME: @@ Should look at CPU word size. */
23502 reloc
= BFD_RELOC_8
;
23505 reloc
= BFD_RELOC_16
;
23509 reloc
= BFD_RELOC_32
;
23512 reloc
= BFD_RELOC_64
;
23517 if (exp
->X_op
== O_secrel
)
23519 exp
->X_op
= O_symbol
;
23520 reloc
= BFD_RELOC_32_SECREL
;
23524 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
23527 #if defined (OBJ_COFF)
23529 arm_validate_fix (fixS
* fixP
)
23531 /* If the destination of the branch is a defined symbol which does not have
23532 the THUMB_FUNC attribute, then we must be calling a function which has
23533 the (interfacearm) attribute. We look for the Thumb entry point to that
23534 function and change the branch to refer to that function instead. */
23535 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
23536 && fixP
->fx_addsy
!= NULL
23537 && S_IS_DEFINED (fixP
->fx_addsy
)
23538 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
23540 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
23547 arm_force_relocation (struct fix
* fixp
)
23549 #if defined (OBJ_COFF) && defined (TE_PE)
23550 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
23554 /* In case we have a call or a branch to a function in ARM ISA mode from
23555 a thumb function or vice-versa force the relocation. These relocations
23556 are cleared off for some cores that might have blx and simple transformations
23560 switch (fixp
->fx_r_type
)
23562 case BFD_RELOC_ARM_PCREL_JUMP
:
23563 case BFD_RELOC_ARM_PCREL_CALL
:
23564 case BFD_RELOC_THUMB_PCREL_BLX
:
23565 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
23569 case BFD_RELOC_ARM_PCREL_BLX
:
23570 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23571 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23572 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23573 if (ARM_IS_FUNC (fixp
->fx_addsy
))
23582 /* Resolve these relocations even if the symbol is extern or weak.
23583 Technically this is probably wrong due to symbol preemption.
23584 In practice these relocations do not have enough range to be useful
23585 at dynamic link time, and some code (e.g. in the Linux kernel)
23586 expects these references to be resolved. */
23587 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
23588 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
23589 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
23590 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
23591 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23592 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
23593 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
23594 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
23595 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
23596 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
23597 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
23598 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
23599 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
23600 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
23603 /* Always leave these relocations for the linker. */
23604 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
23605 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
23606 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
23609 /* Always generate relocations against function symbols. */
23610 if (fixp
->fx_r_type
== BFD_RELOC_32
23612 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
23615 return generic_force_reloc (fixp
);
23618 #if defined (OBJ_ELF) || defined (OBJ_COFF)
23619 /* Relocations against function names must be left unadjusted,
23620 so that the linker can use this information to generate interworking
23621 stubs. The MIPS version of this function
23622 also prevents relocations that are mips-16 specific, but I do not
23623 know why it does this.
23626 There is one other problem that ought to be addressed here, but
23627 which currently is not: Taking the address of a label (rather
23628 than a function) and then later jumping to that address. Such
23629 addresses also ought to have their bottom bit set (assuming that
23630 they reside in Thumb code), but at the moment they will not. */
23633 arm_fix_adjustable (fixS
* fixP
)
23635 if (fixP
->fx_addsy
== NULL
)
23638 /* Preserve relocations against symbols with function type. */
23639 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
23642 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
23643 && fixP
->fx_subsy
== NULL
)
23646 /* We need the symbol name for the VTABLE entries. */
23647 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
23648 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
23651 /* Don't allow symbols to be discarded on GOT related relocs. */
23652 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
23653 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
23654 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
23655 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
23656 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
23657 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
23658 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
23659 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
23660 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
23661 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
23662 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
23663 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
23664 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
23665 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
23668 /* Similarly for group relocations. */
23669 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
23670 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
23671 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
23674 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
23675 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
23676 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23677 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
23678 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
23679 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23680 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
23681 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
23682 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
23687 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23692 elf32_arm_target_format (void)
23695 return (target_big_endian
23696 ? "elf32-bigarm-symbian"
23697 : "elf32-littlearm-symbian");
23698 #elif defined (TE_VXWORKS)
23699 return (target_big_endian
23700 ? "elf32-bigarm-vxworks"
23701 : "elf32-littlearm-vxworks");
23702 #elif defined (TE_NACL)
23703 return (target_big_endian
23704 ? "elf32-bigarm-nacl"
23705 : "elf32-littlearm-nacl");
23707 if (target_big_endian
)
23708 return "elf32-bigarm";
23710 return "elf32-littlearm";
23715 armelf_frob_symbol (symbolS
* symp
,
23718 elf_frob_symbol (symp
, puntp
);
23722 /* MD interface: Finalization. */
23727 literal_pool
* pool
;
23729 /* Ensure that all the IT blocks are properly closed. */
23730 check_it_blocks_finished ();
23732 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
23734 /* Put it at the end of the relevant section. */
23735 subseg_set (pool
->section
, pool
->sub_section
);
23737 arm_elf_change_section ();
23744 /* Remove any excess mapping symbols generated for alignment frags in
23745 SEC. We may have created a mapping symbol before a zero byte
23746 alignment; remove it if there's a mapping symbol after the
23749 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
23750 void *dummy ATTRIBUTE_UNUSED
)
23752 segment_info_type
*seginfo
= seg_info (sec
);
23755 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
23758 for (fragp
= seginfo
->frchainP
->frch_root
;
23760 fragp
= fragp
->fr_next
)
23762 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
23763 fragS
*next
= fragp
->fr_next
;
23765 /* Variable-sized frags have been converted to fixed size by
23766 this point. But if this was variable-sized to start with,
23767 there will be a fixed-size frag after it. So don't handle
23769 if (sym
== NULL
|| next
== NULL
)
23772 if (S_GET_VALUE (sym
) < next
->fr_address
)
23773 /* Not at the end of this frag. */
23775 know (S_GET_VALUE (sym
) == next
->fr_address
);
23779 if (next
->tc_frag_data
.first_map
!= NULL
)
23781 /* Next frag starts with a mapping symbol. Discard this
23783 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
23787 if (next
->fr_next
== NULL
)
23789 /* This mapping symbol is at the end of the section. Discard
23791 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
23792 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
23796 /* As long as we have empty frags without any mapping symbols,
23798 /* If the next frag is non-empty and does not start with a
23799 mapping symbol, then this mapping symbol is required. */
23800 if (next
->fr_address
!= next
->fr_next
->fr_address
)
23803 next
= next
->fr_next
;
23805 while (next
!= NULL
);
23810 /* Adjust the symbol table. This marks Thumb symbols as distinct from
23814 arm_adjust_symtab (void)
23819 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
23821 if (ARM_IS_THUMB (sym
))
23823 if (THUMB_IS_FUNC (sym
))
23825 /* Mark the symbol as a Thumb function. */
23826 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
23827 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
23828 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
23830 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
23831 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
23833 as_bad (_("%s: unexpected function type: %d"),
23834 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
23836 else switch (S_GET_STORAGE_CLASS (sym
))
23839 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
23842 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
23845 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
23853 if (ARM_IS_INTERWORK (sym
))
23854 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
23861 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
23863 if (ARM_IS_THUMB (sym
))
23865 elf_symbol_type
* elf_sym
;
23867 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
23868 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
23870 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
23871 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
23873 /* If it's a .thumb_func, declare it as so,
23874 otherwise tag label as .code 16. */
23875 if (THUMB_IS_FUNC (sym
))
23876 elf_sym
->internal_elf_sym
.st_target_internal
23877 = ST_BRANCH_TO_THUMB
;
23878 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
23879 elf_sym
->internal_elf_sym
.st_info
=
23880 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
23885 /* Remove any overlapping mapping symbols generated by alignment frags. */
23886 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
23887 /* Now do generic ELF adjustments. */
23888 elf_adjust_symtab ();
23892 /* MD interface: Initialization. */
23895 set_constant_flonums (void)
23899 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
23900 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
23904 /* Auto-select Thumb mode if it's the only available instruction set for the
23905 given architecture. */
23908 autoselect_thumb_from_cpu_variant (void)
23910 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
23911 opcode_select (16);
23920 if ( (arm_ops_hsh
= hash_new ()) == NULL
23921 || (arm_cond_hsh
= hash_new ()) == NULL
23922 || (arm_shift_hsh
= hash_new ()) == NULL
23923 || (arm_psr_hsh
= hash_new ()) == NULL
23924 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
23925 || (arm_reg_hsh
= hash_new ()) == NULL
23926 || (arm_reloc_hsh
= hash_new ()) == NULL
23927 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
23928 as_fatal (_("virtual memory exhausted"));
23930 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
23931 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
23932 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
23933 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
23934 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
23935 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
23936 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
23937 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
23938 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
23939 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
23940 (void *) (v7m_psrs
+ i
));
23941 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
23942 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
23944 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
23946 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
23947 (void *) (barrier_opt_names
+ i
));
23949 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
23951 struct reloc_entry
* entry
= reloc_names
+ i
;
23953 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
23954 /* This makes encode_branch() use the EABI versions of this relocation. */
23955 entry
->reloc
= BFD_RELOC_UNUSED
;
23957 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
23961 set_constant_flonums ();
23963 /* Set the cpu variant based on the command-line options. We prefer
23964 -mcpu= over -march= if both are set (as for GCC); and we prefer
23965 -mfpu= over any other way of setting the floating point unit.
23966 Use of legacy options with new options are faulted. */
23969 if (mcpu_cpu_opt
|| march_cpu_opt
)
23970 as_bad (_("use of old and new-style options to set CPU type"));
23972 mcpu_cpu_opt
= legacy_cpu
;
23974 else if (!mcpu_cpu_opt
)
23975 mcpu_cpu_opt
= march_cpu_opt
;
23980 as_bad (_("use of old and new-style options to set FPU type"));
23982 mfpu_opt
= legacy_fpu
;
23984 else if (!mfpu_opt
)
23986 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23987 || defined (TE_NetBSD) || defined (TE_VXWORKS))
23988 /* Some environments specify a default FPU. If they don't, infer it
23989 from the processor. */
23991 mfpu_opt
= mcpu_fpu_opt
;
23993 mfpu_opt
= march_fpu_opt
;
23995 mfpu_opt
= &fpu_default
;
24001 if (mcpu_cpu_opt
!= NULL
)
24002 mfpu_opt
= &fpu_default
;
24003 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
24004 mfpu_opt
= &fpu_arch_vfp_v2
;
24006 mfpu_opt
= &fpu_arch_fpa
;
24012 mcpu_cpu_opt
= &cpu_default
;
24013 selected_cpu
= cpu_default
;
24015 else if (no_cpu_selected ())
24016 selected_cpu
= cpu_default
;
24019 selected_cpu
= *mcpu_cpu_opt
;
24021 mcpu_cpu_opt
= &arm_arch_any
;
24024 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
24026 autoselect_thumb_from_cpu_variant ();
24028 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
24030 #if defined OBJ_COFF || defined OBJ_ELF
24032 unsigned int flags
= 0;
24034 #if defined OBJ_ELF
24035 flags
= meabi_flags
;
24037 switch (meabi_flags
)
24039 case EF_ARM_EABI_UNKNOWN
:
24041 /* Set the flags in the private structure. */
24042 if (uses_apcs_26
) flags
|= F_APCS26
;
24043 if (support_interwork
) flags
|= F_INTERWORK
;
24044 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
24045 if (pic_code
) flags
|= F_PIC
;
24046 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
24047 flags
|= F_SOFT_FLOAT
;
24049 switch (mfloat_abi_opt
)
24051 case ARM_FLOAT_ABI_SOFT
:
24052 case ARM_FLOAT_ABI_SOFTFP
:
24053 flags
|= F_SOFT_FLOAT
;
24056 case ARM_FLOAT_ABI_HARD
:
24057 if (flags
& F_SOFT_FLOAT
)
24058 as_bad (_("hard-float conflicts with specified fpu"));
24062 /* Using pure-endian doubles (even if soft-float). */
24063 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
24064 flags
|= F_VFP_FLOAT
;
24066 #if defined OBJ_ELF
24067 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
24068 flags
|= EF_ARM_MAVERICK_FLOAT
;
24071 case EF_ARM_EABI_VER4
:
24072 case EF_ARM_EABI_VER5
:
24073 /* No additional flags to set. */
24080 bfd_set_private_flags (stdoutput
, flags
);
24082 /* We have run out flags in the COFF header to encode the
24083 status of ATPCS support, so instead we create a dummy,
24084 empty, debug section called .arm.atpcs. */
24089 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
24093 bfd_set_section_flags
24094 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
24095 bfd_set_section_size (stdoutput
, sec
, 0);
24096 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
24102 /* Record the CPU type as well. */
24103 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
24104 mach
= bfd_mach_arm_iWMMXt2
;
24105 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
24106 mach
= bfd_mach_arm_iWMMXt
;
24107 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
24108 mach
= bfd_mach_arm_XScale
;
24109 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
24110 mach
= bfd_mach_arm_ep9312
;
24111 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
24112 mach
= bfd_mach_arm_5TE
;
24113 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
24115 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24116 mach
= bfd_mach_arm_5T
;
24118 mach
= bfd_mach_arm_5
;
24120 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
24122 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24123 mach
= bfd_mach_arm_4T
;
24125 mach
= bfd_mach_arm_4
;
24127 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
24128 mach
= bfd_mach_arm_3M
;
24129 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
24130 mach
= bfd_mach_arm_3
;
24131 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
24132 mach
= bfd_mach_arm_2a
;
24133 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
24134 mach
= bfd_mach_arm_2
;
24136 mach
= bfd_mach_arm_unknown
;
24138 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
24141 /* Command line processing. */
24144 Invocation line includes a switch not recognized by the base assembler.
24145 See if it's a processor-specific option.
24147 This routine is somewhat complicated by the need for backwards
24148 compatibility (since older releases of gcc can't be changed).
24149 The new options try to make the interface as compatible as
24152 New options (supported) are:
24154 -mcpu=<cpu name> Assemble for selected processor
24155 -march=<architecture name> Assemble for selected architecture
24156 -mfpu=<fpu architecture> Assemble for selected FPU.
24157 -EB/-mbig-endian Big-endian
24158 -EL/-mlittle-endian Little-endian
24159 -k Generate PIC code
24160 -mthumb Start in Thumb mode
24161 -mthumb-interwork Code supports ARM/Thumb interworking
24163 -m[no-]warn-deprecated Warn about deprecated features
24164 -m[no-]warn-syms Warn when symbols match instructions
24166 For now we will also provide support for:
24168 -mapcs-32 32-bit Program counter
24169 -mapcs-26 26-bit Program counter
24170 -macps-float Floats passed in FP registers
24171 -mapcs-reentrant Reentrant code
24173 (sometime these will probably be replaced with -mapcs=<list of options>
24174 and -matpcs=<list of options>)
24176 The remaining options are only supported for back-wards compatibility.
24177 Cpu variants, the arm part is optional:
24178 -m[arm]1 Currently not supported.
24179 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24180 -m[arm]3 Arm 3 processor
24181 -m[arm]6[xx], Arm 6 processors
24182 -m[arm]7[xx][t][[d]m] Arm 7 processors
24183 -m[arm]8[10] Arm 8 processors
24184 -m[arm]9[20][tdmi] Arm 9 processors
24185 -mstrongarm[110[0]] StrongARM processors
24186 -mxscale XScale processors
24187 -m[arm]v[2345[t[e]]] Arm architectures
24188 -mall All (except the ARM1)
24190 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24191 -mfpe-old (No float load/store multiples)
24192 -mvfpxd VFP Single precision
24194 -mno-fpu Disable all floating point instructions
24196 The following CPU names are recognized:
24197 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24198 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24199 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24200 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24201 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24202 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24203 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24207 const char * md_shortopts
= "m:k";
24209 #ifdef ARM_BI_ENDIAN
24210 #define OPTION_EB (OPTION_MD_BASE + 0)
24211 #define OPTION_EL (OPTION_MD_BASE + 1)
24213 #if TARGET_BYTES_BIG_ENDIAN
24214 #define OPTION_EB (OPTION_MD_BASE + 0)
24216 #define OPTION_EL (OPTION_MD_BASE + 1)
24219 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24221 struct option md_longopts
[] =
24224 {"EB", no_argument
, NULL
, OPTION_EB
},
24227 {"EL", no_argument
, NULL
, OPTION_EL
},
24229 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
24230 {NULL
, no_argument
, NULL
, 0}
24234 size_t md_longopts_size
= sizeof (md_longopts
);
24236 struct arm_option_table
24238 char *option
; /* Option name to match. */
24239 char *help
; /* Help information. */
24240 int *var
; /* Variable to change. */
24241 int value
; /* What to change it to. */
24242 char *deprecated
; /* If non-null, print this message. */
24245 struct arm_option_table arm_opts
[] =
24247 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
24248 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
24249 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24250 &support_interwork
, 1, NULL
},
24251 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
24252 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
24253 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
24255 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
24256 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
24257 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
24258 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
24261 /* These are recognized by the assembler, but have no affect on code. */
24262 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
24263 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
24265 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
24266 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24267 &warn_on_deprecated
, 0, NULL
},
24268 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
24269 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
24270 {NULL
, NULL
, NULL
, 0, NULL
}
24273 struct arm_legacy_option_table
24275 char *option
; /* Option name to match. */
24276 const arm_feature_set
**var
; /* Variable to change. */
24277 const arm_feature_set value
; /* What to change it to. */
24278 char *deprecated
; /* If non-null, print this message. */
24281 const struct arm_legacy_option_table arm_legacy_opts
[] =
24283 /* DON'T add any new processors to this list -- we want the whole list
24284 to go away... Add them to the processors table instead. */
24285 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24286 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24287 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24288 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24289 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24290 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24291 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24292 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24293 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24294 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24295 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24296 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24297 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24298 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24299 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24300 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24301 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24302 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24303 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24304 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24305 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24306 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24307 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24308 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24309 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24310 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24311 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24312 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24313 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24314 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24315 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24316 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24317 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24318 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24319 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24320 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24321 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24322 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24323 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24324 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24325 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24326 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24327 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24328 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24329 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24330 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24331 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24332 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24333 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24334 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24335 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24336 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24337 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24338 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24339 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24340 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24341 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24342 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24343 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24344 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24345 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24346 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24347 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24348 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24349 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24350 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24351 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24352 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24353 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
24354 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
24355 N_("use -mcpu=strongarm110")},
24356 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
24357 N_("use -mcpu=strongarm1100")},
24358 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
24359 N_("use -mcpu=strongarm1110")},
24360 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
24361 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
24362 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
24364 /* Architecture variants -- don't add any more to this list either. */
24365 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24366 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24367 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24368 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24369 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24370 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24371 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24372 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24373 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24374 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24375 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24376 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24377 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24378 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24379 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24380 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24381 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24382 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24384 /* Floating point variants -- don't add any more to this list either. */
24385 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
24386 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
24387 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
24388 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
24389 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24391 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
24394 struct arm_cpu_option_table
24398 const arm_feature_set value
;
24399 /* For some CPUs we assume an FPU unless the user explicitly sets
24401 const arm_feature_set default_fpu
;
24402 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24404 const char *canonical_name
;
24407 /* This list should, at a minimum, contain all the cpu names
24408 recognized by GCC. */
24409 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24410 static const struct arm_cpu_option_table arm_cpus
[] =
24412 ARM_CPU_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
, NULL
),
24413 ARM_CPU_OPT ("arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
),
24414 ARM_CPU_OPT ("arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
),
24415 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24416 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24417 ARM_CPU_OPT ("arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24418 ARM_CPU_OPT ("arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24419 ARM_CPU_OPT ("arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24420 ARM_CPU_OPT ("arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24421 ARM_CPU_OPT ("arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24422 ARM_CPU_OPT ("arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24423 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24424 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24425 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24426 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24427 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24428 ARM_CPU_OPT ("arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24429 ARM_CPU_OPT ("arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24430 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24431 ARM_CPU_OPT ("arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24432 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24433 ARM_CPU_OPT ("arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24434 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24435 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24436 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24437 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24438 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24439 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24440 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24441 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24442 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24443 ARM_CPU_OPT ("arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24444 ARM_CPU_OPT ("arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24445 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24446 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24447 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24448 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24449 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24450 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24451 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"),
24452 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24453 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24454 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24455 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24456 ARM_CPU_OPT ("fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24457 ARM_CPU_OPT ("fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24458 /* For V5 or later processors we default to using VFP; but the user
24459 should really set the FPU type explicitly. */
24460 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24461 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24462 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24463 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24464 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24465 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24466 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"),
24467 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24468 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24469 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"),
24470 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24471 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24472 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24473 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24474 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24475 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"),
24476 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24477 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24478 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24479 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
,
24481 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24482 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24483 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24484 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24485 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24486 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24487 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"),
24488 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
),
24489 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
,
24491 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
),
24492 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"),
24493 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"),
24494 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
),
24495 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
),
24496 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
),
24497 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
),
24498 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC
,
24499 FPU_NONE
, "Cortex-A5"),
24500 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24502 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC
,
24503 ARM_FEATURE_COPROC (FPU_VFP_V3
24504 | FPU_NEON_EXT_V1
),
24506 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC
,
24507 ARM_FEATURE_COPROC (FPU_VFP_V3
24508 | FPU_NEON_EXT_V1
),
24510 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24512 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24514 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24516 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24518 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24520 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24522 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"),
24523 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
24525 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV
,
24526 FPU_NONE
, "Cortex-R5"),
24527 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV
,
24528 FPU_ARCH_VFP_V3D16
,
24530 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M7"),
24531 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"),
24532 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"),
24533 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"),
24534 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"),
24535 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0+"),
24536 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24539 /* ??? XSCALE is really an architecture. */
24540 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
24541 /* ??? iwmmxt is not a processor. */
24542 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
),
24543 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
),
24544 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
24546 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
24547 FPU_ARCH_MAVERICK
, "ARM920T"),
24548 /* Marvell processors. */
24549 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE_LOW (ARM_AEXT_V7A
| ARM_EXT_MP
24551 FPU_ARCH_VFP_V3D16
, NULL
),
24552 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE_LOW (ARM_AEXT_V7A
| ARM_EXT_MP
24554 FPU_ARCH_NEON_VFP_V4
, NULL
),
24555 /* APM X-Gene family. */
24556 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24558 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24561 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
24565 struct arm_arch_option_table
24569 const arm_feature_set value
;
24570 const arm_feature_set default_fpu
;
24573 /* This list should, at a minimum, contain all the architecture names
24574 recognized by GCC. */
24575 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24576 static const struct arm_arch_option_table arm_archs
[] =
24578 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
24579 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
24580 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
24581 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
24582 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
24583 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
24584 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
24585 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
24586 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
24587 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
24588 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
24589 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
24590 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
24591 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
24592 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
24593 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
24594 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
24595 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
24596 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
24597 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
24598 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
24599 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
),
24600 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
24601 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
24602 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
24603 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
),
24604 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
24605 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
24606 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
24607 /* The official spelling of the ARMv7 profile variants is the dashed form.
24608 Accept the non-dashed form for compatibility with old toolchains. */
24609 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
24610 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
24611 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
24612 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
24613 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
24614 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
24615 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
24616 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
24617 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
24618 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
24619 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
24620 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
24621 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
24623 #undef ARM_ARCH_OPT
24625 /* ISA extensions in the co-processor and main instruction set space. */
24626 struct arm_option_extension_value_table
24630 const arm_feature_set merge_value
;
24631 const arm_feature_set clear_value
;
24632 const arm_feature_set allowed_archs
;
24635 /* The following table must be in alphabetical order with a NULL last entry.
24637 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
24638 static const struct arm_option_extension_value_table arm_extensions
[] =
24640 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
24641 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
24642 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24643 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
24644 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
24645 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
24646 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
24647 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
24648 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
24649 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
24650 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
24651 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ANY
),
24652 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
24653 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ANY
),
24654 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
24655 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ANY
),
24656 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
24657 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
24658 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
24659 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
24660 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
24661 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
24662 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
24663 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
24664 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
24665 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
24666 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
24667 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V7A
)),
24668 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
24670 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
24671 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
24672 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
24673 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ANY
),
24674 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
24678 /* ISA floating-point and Advanced SIMD extensions. */
24679 struct arm_option_fpu_value_table
24682 const arm_feature_set value
;
24685 /* This list should, at a minimum, contain all the fpu names
24686 recognized by GCC. */
24687 static const struct arm_option_fpu_value_table arm_fpus
[] =
24689 {"softfpa", FPU_NONE
},
24690 {"fpe", FPU_ARCH_FPE
},
24691 {"fpe2", FPU_ARCH_FPE
},
24692 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
24693 {"fpa", FPU_ARCH_FPA
},
24694 {"fpa10", FPU_ARCH_FPA
},
24695 {"fpa11", FPU_ARCH_FPA
},
24696 {"arm7500fe", FPU_ARCH_FPA
},
24697 {"softvfp", FPU_ARCH_VFP
},
24698 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
24699 {"vfp", FPU_ARCH_VFP_V2
},
24700 {"vfp9", FPU_ARCH_VFP_V2
},
24701 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
24702 {"vfp10", FPU_ARCH_VFP_V2
},
24703 {"vfp10-r0", FPU_ARCH_VFP_V1
},
24704 {"vfpxd", FPU_ARCH_VFP_V1xD
},
24705 {"vfpv2", FPU_ARCH_VFP_V2
},
24706 {"vfpv3", FPU_ARCH_VFP_V3
},
24707 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
24708 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
24709 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
24710 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
24711 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
24712 {"arm1020t", FPU_ARCH_VFP_V1
},
24713 {"arm1020e", FPU_ARCH_VFP_V2
},
24714 {"arm1136jfs", FPU_ARCH_VFP_V2
},
24715 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
24716 {"maverick", FPU_ARCH_MAVERICK
},
24717 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
24718 {"neon-fp16", FPU_ARCH_NEON_FP16
},
24719 {"vfpv4", FPU_ARCH_VFP_V4
},
24720 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
24721 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
24722 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
24723 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
24724 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
24725 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
24726 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
24727 {"crypto-neon-fp-armv8",
24728 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
24729 {NULL
, ARM_ARCH_NONE
}
24732 struct arm_option_value_table
24738 static const struct arm_option_value_table arm_float_abis
[] =
24740 {"hard", ARM_FLOAT_ABI_HARD
},
24741 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
24742 {"soft", ARM_FLOAT_ABI_SOFT
},
24747 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
24748 static const struct arm_option_value_table arm_eabis
[] =
24750 {"gnu", EF_ARM_EABI_UNKNOWN
},
24751 {"4", EF_ARM_EABI_VER4
},
24752 {"5", EF_ARM_EABI_VER5
},
24757 struct arm_long_option_table
24759 char * option
; /* Substring to match. */
24760 char * help
; /* Help information. */
24761 int (* func
) (char * subopt
); /* Function to decode sub-option. */
24762 char * deprecated
; /* If non-null, print this message. */
24766 arm_parse_extension (char *str
, const arm_feature_set
**opt_p
)
24768 arm_feature_set
*ext_set
= (arm_feature_set
*)
24769 xmalloc (sizeof (arm_feature_set
));
24771 /* We insist on extensions being specified in alphabetical order, and with
24772 extensions being added before being removed. We achieve this by having
24773 the global ARM_EXTENSIONS table in alphabetical order, and using the
24774 ADDING_VALUE variable to indicate whether we are adding an extension (1)
24775 or removing it (0) and only allowing it to change in the order
24777 const struct arm_option_extension_value_table
* opt
= NULL
;
24778 int adding_value
= -1;
24780 /* Copy the feature set, so that we can modify it. */
24781 *ext_set
= **opt_p
;
24784 while (str
!= NULL
&& *str
!= 0)
24791 as_bad (_("invalid architectural extension"));
24796 ext
= strchr (str
, '+');
24801 len
= strlen (str
);
24803 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
24805 if (adding_value
!= 0)
24808 opt
= arm_extensions
;
24816 if (adding_value
== -1)
24819 opt
= arm_extensions
;
24821 else if (adding_value
!= 1)
24823 as_bad (_("must specify extensions to add before specifying "
24824 "those to remove"));
24831 as_bad (_("missing architectural extension"));
24835 gas_assert (adding_value
!= -1);
24836 gas_assert (opt
!= NULL
);
24838 /* Scan over the options table trying to find an exact match. */
24839 for (; opt
->name
!= NULL
; opt
++)
24840 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
24842 /* Check we can apply the extension to this architecture. */
24843 if (!ARM_CPU_HAS_FEATURE (*ext_set
, opt
->allowed_archs
))
24845 as_bad (_("extension does not apply to the base architecture"));
24849 /* Add or remove the extension. */
24851 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
24853 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
24858 if (opt
->name
== NULL
)
24860 /* Did we fail to find an extension because it wasn't specified in
24861 alphabetical order, or because it does not exist? */
24863 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
24864 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
24867 if (opt
->name
== NULL
)
24868 as_bad (_("unknown architectural extension `%s'"), str
);
24870 as_bad (_("architectural extensions must be specified in "
24871 "alphabetical order"));
24877 /* We should skip the extension we've just matched the next time
24889 arm_parse_cpu (char *str
)
24891 const struct arm_cpu_option_table
*opt
;
24892 char *ext
= strchr (str
, '+');
24898 len
= strlen (str
);
24902 as_bad (_("missing cpu name `%s'"), str
);
24906 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
24907 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
24909 mcpu_cpu_opt
= &opt
->value
;
24910 mcpu_fpu_opt
= &opt
->default_fpu
;
24911 if (opt
->canonical_name
)
24912 strcpy (selected_cpu_name
, opt
->canonical_name
);
24917 for (i
= 0; i
< len
; i
++)
24918 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
24919 selected_cpu_name
[i
] = 0;
24923 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
24928 as_bad (_("unknown cpu `%s'"), str
);
24933 arm_parse_arch (char *str
)
24935 const struct arm_arch_option_table
*opt
;
24936 char *ext
= strchr (str
, '+');
24942 len
= strlen (str
);
24946 as_bad (_("missing architecture name `%s'"), str
);
24950 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
24951 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
24953 march_cpu_opt
= &opt
->value
;
24954 march_fpu_opt
= &opt
->default_fpu
;
24955 strcpy (selected_cpu_name
, opt
->name
);
24958 return arm_parse_extension (ext
, &march_cpu_opt
);
24963 as_bad (_("unknown architecture `%s'\n"), str
);
24968 arm_parse_fpu (char * str
)
24970 const struct arm_option_fpu_value_table
* opt
;
24972 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
24973 if (streq (opt
->name
, str
))
24975 mfpu_opt
= &opt
->value
;
24979 as_bad (_("unknown floating point format `%s'\n"), str
);
24984 arm_parse_float_abi (char * str
)
24986 const struct arm_option_value_table
* opt
;
24988 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
24989 if (streq (opt
->name
, str
))
24991 mfloat_abi_opt
= opt
->value
;
24995 as_bad (_("unknown floating point abi `%s'\n"), str
);
25001 arm_parse_eabi (char * str
)
25003 const struct arm_option_value_table
*opt
;
25005 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
25006 if (streq (opt
->name
, str
))
25008 meabi_flags
= opt
->value
;
25011 as_bad (_("unknown EABI `%s'\n"), str
);
25017 arm_parse_it_mode (char * str
)
25019 bfd_boolean ret
= TRUE
;
25021 if (streq ("arm", str
))
25022 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
25023 else if (streq ("thumb", str
))
25024 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
25025 else if (streq ("always", str
))
25026 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
25027 else if (streq ("never", str
))
25028 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
25031 as_bad (_("unknown implicit IT mode `%s', should be "\
25032 "arm, thumb, always, or never."), str
);
25040 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED
)
25042 codecomposer_syntax
= TRUE
;
25043 arm_comment_chars
[0] = ';';
25044 arm_line_separator_chars
[0] = 0;
25048 struct arm_long_option_table arm_long_opts
[] =
25050 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25051 arm_parse_cpu
, NULL
},
25052 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25053 arm_parse_arch
, NULL
},
25054 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25055 arm_parse_fpu
, NULL
},
25056 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25057 arm_parse_float_abi
, NULL
},
25059 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25060 arm_parse_eabi
, NULL
},
25062 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25063 arm_parse_it_mode
, NULL
},
25064 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25065 arm_ccs_mode
, NULL
},
25066 {NULL
, NULL
, 0, NULL
}
25070 md_parse_option (int c
, char * arg
)
25072 struct arm_option_table
*opt
;
25073 const struct arm_legacy_option_table
*fopt
;
25074 struct arm_long_option_table
*lopt
;
25080 target_big_endian
= 1;
25086 target_big_endian
= 0;
25090 case OPTION_FIX_V4BX
:
25095 /* Listing option. Just ignore these, we don't support additional
25100 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25102 if (c
== opt
->option
[0]
25103 && ((arg
== NULL
&& opt
->option
[1] == 0)
25104 || streq (arg
, opt
->option
+ 1)))
25106 /* If the option is deprecated, tell the user. */
25107 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
25108 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25109 arg
? arg
: "", _(opt
->deprecated
));
25111 if (opt
->var
!= NULL
)
25112 *opt
->var
= opt
->value
;
25118 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
25120 if (c
== fopt
->option
[0]
25121 && ((arg
== NULL
&& fopt
->option
[1] == 0)
25122 || streq (arg
, fopt
->option
+ 1)))
25124 /* If the option is deprecated, tell the user. */
25125 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
25126 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25127 arg
? arg
: "", _(fopt
->deprecated
));
25129 if (fopt
->var
!= NULL
)
25130 *fopt
->var
= &fopt
->value
;
25136 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25138 /* These options are expected to have an argument. */
25139 if (c
== lopt
->option
[0]
25141 && strncmp (arg
, lopt
->option
+ 1,
25142 strlen (lopt
->option
+ 1)) == 0)
25144 /* If the option is deprecated, tell the user. */
25145 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
25146 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
25147 _(lopt
->deprecated
));
25149 /* Call the sup-option parser. */
25150 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
25161 md_show_usage (FILE * fp
)
25163 struct arm_option_table
*opt
;
25164 struct arm_long_option_table
*lopt
;
25166 fprintf (fp
, _(" ARM-specific assembler options:\n"));
25168 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25169 if (opt
->help
!= NULL
)
25170 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
25172 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25173 if (lopt
->help
!= NULL
)
25174 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
25178 -EB assemble code for a big-endian cpu\n"));
25183 -EL assemble code for a little-endian cpu\n"));
25187 --fix-v4bx Allow BX in ARMv4 code\n"));
25195 arm_feature_set flags
;
25196 } cpu_arch_ver_table
;
25198 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
25199 least features first. */
25200 static const cpu_arch_ver_table cpu_arch_ver
[] =
25206 {4, ARM_ARCH_V5TE
},
25207 {5, ARM_ARCH_V5TEJ
},
25211 {11, ARM_ARCH_V6M
},
25212 {12, ARM_ARCH_V6SM
},
25213 {8, ARM_ARCH_V6T2
},
25214 {10, ARM_ARCH_V7VE
},
25215 {10, ARM_ARCH_V7R
},
25216 {10, ARM_ARCH_V7M
},
25217 {14, ARM_ARCH_V8A
},
25221 /* Set an attribute if it has not already been set by the user. */
25223 aeabi_set_attribute_int (int tag
, int value
)
25226 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25227 || !attributes_set_explicitly
[tag
])
25228 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
25232 aeabi_set_attribute_string (int tag
, const char *value
)
25235 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25236 || !attributes_set_explicitly
[tag
])
25237 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
25240 /* Set the public EABI object attributes. */
25242 aeabi_set_public_attributes (void)
25247 int fp16_optional
= 0;
25248 arm_feature_set flags
;
25249 arm_feature_set tmp
;
25250 const cpu_arch_ver_table
*p
;
25252 /* Choose the architecture based on the capabilities of the requested cpu
25253 (if any) and/or the instructions actually used. */
25254 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
25255 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
25256 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
25258 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
25259 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
25261 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
25262 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
25264 selected_cpu
= flags
;
25266 /* Allow the user to override the reported architecture. */
25269 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
25270 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
25273 /* We need to make sure that the attributes do not identify us as v6S-M
25274 when the only v6S-M feature in use is the Operating System Extensions. */
25275 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
25276 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
25277 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
25281 for (p
= cpu_arch_ver
; p
->val
; p
++)
25283 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
25286 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
25290 /* The table lookup above finds the last architecture to contribute
25291 a new feature. Unfortunately, Tag13 is a subset of the union of
25292 v6T2 and v7-M, so it is never seen as contributing a new feature.
25293 We can not search for the last entry which is entirely used,
25294 because if no CPU is specified we build up only those flags
25295 actually used. Perhaps we should separate out the specified
25296 and implicit cases. Avoid taking this path for -march=all by
25297 checking for contradictory v7-A / v7-M features. */
25299 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
25300 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
25301 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
25304 /* Tag_CPU_name. */
25305 if (selected_cpu_name
[0])
25309 q
= selected_cpu_name
;
25310 if (strncmp (q
, "armv", 4) == 0)
25315 for (i
= 0; q
[i
]; i
++)
25316 q
[i
] = TOUPPER (q
[i
]);
25318 aeabi_set_attribute_string (Tag_CPU_name
, q
);
25321 /* Tag_CPU_arch. */
25322 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
25324 /* Tag_CPU_arch_profile. */
25325 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
25327 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
25329 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
25334 if (profile
!= '\0')
25335 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
25337 /* Tag_ARM_ISA_use. */
25338 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
25340 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
25342 /* Tag_THUMB_ISA_use. */
25343 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
25345 aeabi_set_attribute_int (Tag_THUMB_ISA_use
,
25346 ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
) ? 2 : 1);
25348 /* Tag_VFP_arch. */
25349 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
25350 aeabi_set_attribute_int (Tag_VFP_arch
,
25351 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25353 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
25354 aeabi_set_attribute_int (Tag_VFP_arch
,
25355 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25357 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
25360 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
25362 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
25364 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
25367 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
25368 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
25369 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
25370 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
25371 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
25373 /* Tag_ABI_HardFP_use. */
25374 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
25375 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
25376 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
25378 /* Tag_WMMX_arch. */
25379 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
25380 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
25381 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
25382 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
25384 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25385 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
25386 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
25387 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
25389 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
25391 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
25395 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
25400 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25401 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
25402 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
25406 We set Tag_DIV_use to two when integer divide instructions have been used
25407 in ARM state, or when Thumb integer divide instructions have been used,
25408 but we have no architecture profile set, nor have we any ARM instructions.
25410 For ARMv8 we set the tag to 0 as integer divide is implied by the base
25413 For new architectures we will have to check these tests. */
25414 gas_assert (arch
<= TAG_CPU_ARCH_V8
);
25415 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
))
25416 aeabi_set_attribute_int (Tag_DIV_use
, 0);
25417 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
25418 || (profile
== '\0'
25419 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
25420 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
25421 aeabi_set_attribute_int (Tag_DIV_use
, 2);
25423 /* Tag_MP_extension_use. */
25424 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
25425 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
25427 /* Tag Virtualization_use. */
25428 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
25430 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
25433 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
25436 /* Add the default contents for the .ARM.attributes section. */
25440 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
25443 aeabi_set_public_attributes ();
25445 #endif /* OBJ_ELF */
25448 /* Parse a .cpu directive. */
25451 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
25453 const struct arm_cpu_option_table
*opt
;
25457 name
= input_line_pointer
;
25458 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25459 input_line_pointer
++;
25460 saved_char
= *input_line_pointer
;
25461 *input_line_pointer
= 0;
25463 /* Skip the first "all" entry. */
25464 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
25465 if (streq (opt
->name
, name
))
25467 mcpu_cpu_opt
= &opt
->value
;
25468 selected_cpu
= opt
->value
;
25469 if (opt
->canonical_name
)
25470 strcpy (selected_cpu_name
, opt
->canonical_name
);
25474 for (i
= 0; opt
->name
[i
]; i
++)
25475 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25477 selected_cpu_name
[i
] = 0;
25479 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25480 *input_line_pointer
= saved_char
;
25481 demand_empty_rest_of_line ();
25484 as_bad (_("unknown cpu `%s'"), name
);
25485 *input_line_pointer
= saved_char
;
25486 ignore_rest_of_line ();
25490 /* Parse a .arch directive. */
25493 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
25495 const struct arm_arch_option_table
*opt
;
25499 name
= input_line_pointer
;
25500 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25501 input_line_pointer
++;
25502 saved_char
= *input_line_pointer
;
25503 *input_line_pointer
= 0;
25505 /* Skip the first "all" entry. */
25506 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
25507 if (streq (opt
->name
, name
))
25509 mcpu_cpu_opt
= &opt
->value
;
25510 selected_cpu
= opt
->value
;
25511 strcpy (selected_cpu_name
, opt
->name
);
25512 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25513 *input_line_pointer
= saved_char
;
25514 demand_empty_rest_of_line ();
25518 as_bad (_("unknown architecture `%s'\n"), name
);
25519 *input_line_pointer
= saved_char
;
25520 ignore_rest_of_line ();
25524 /* Parse a .object_arch directive. */
25527 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
25529 const struct arm_arch_option_table
*opt
;
25533 name
= input_line_pointer
;
25534 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25535 input_line_pointer
++;
25536 saved_char
= *input_line_pointer
;
25537 *input_line_pointer
= 0;
25539 /* Skip the first "all" entry. */
25540 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
25541 if (streq (opt
->name
, name
))
25543 object_arch
= &opt
->value
;
25544 *input_line_pointer
= saved_char
;
25545 demand_empty_rest_of_line ();
25549 as_bad (_("unknown architecture `%s'\n"), name
);
25550 *input_line_pointer
= saved_char
;
25551 ignore_rest_of_line ();
25554 /* Parse a .arch_extension directive. */
25557 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
25559 const struct arm_option_extension_value_table
*opt
;
25562 int adding_value
= 1;
25564 name
= input_line_pointer
;
25565 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25566 input_line_pointer
++;
25567 saved_char
= *input_line_pointer
;
25568 *input_line_pointer
= 0;
25570 if (strlen (name
) >= 2
25571 && strncmp (name
, "no", 2) == 0)
25577 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
25578 if (streq (opt
->name
, name
))
25580 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt
, opt
->allowed_archs
))
25582 as_bad (_("architectural extension `%s' is not allowed for the "
25583 "current base architecture"), name
);
25588 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
,
25591 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->clear_value
);
25593 mcpu_cpu_opt
= &selected_cpu
;
25594 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25595 *input_line_pointer
= saved_char
;
25596 demand_empty_rest_of_line ();
25600 if (opt
->name
== NULL
)
25601 as_bad (_("unknown architecture extension `%s'\n"), name
);
25603 *input_line_pointer
= saved_char
;
25604 ignore_rest_of_line ();
25607 /* Parse a .fpu directive. */
25610 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
25612 const struct arm_option_fpu_value_table
*opt
;
25616 name
= input_line_pointer
;
25617 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25618 input_line_pointer
++;
25619 saved_char
= *input_line_pointer
;
25620 *input_line_pointer
= 0;
25622 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
25623 if (streq (opt
->name
, name
))
25625 mfpu_opt
= &opt
->value
;
25626 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25627 *input_line_pointer
= saved_char
;
25628 demand_empty_rest_of_line ();
25632 as_bad (_("unknown floating point format `%s'\n"), name
);
25633 *input_line_pointer
= saved_char
;
25634 ignore_rest_of_line ();
25637 /* Copy symbol information. */
25640 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
25642 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
25646 /* Given a symbolic attribute NAME, return the proper integer value.
25647 Returns -1 if the attribute is not known. */
25650 arm_convert_symbolic_attribute (const char *name
)
25652 static const struct
25657 attribute_table
[] =
25659 /* When you modify this table you should
25660 also modify the list in doc/c-arm.texi. */
25661 #define T(tag) {#tag, tag}
25662 T (Tag_CPU_raw_name
),
25665 T (Tag_CPU_arch_profile
),
25666 T (Tag_ARM_ISA_use
),
25667 T (Tag_THUMB_ISA_use
),
25671 T (Tag_Advanced_SIMD_arch
),
25672 T (Tag_PCS_config
),
25673 T (Tag_ABI_PCS_R9_use
),
25674 T (Tag_ABI_PCS_RW_data
),
25675 T (Tag_ABI_PCS_RO_data
),
25676 T (Tag_ABI_PCS_GOT_use
),
25677 T (Tag_ABI_PCS_wchar_t
),
25678 T (Tag_ABI_FP_rounding
),
25679 T (Tag_ABI_FP_denormal
),
25680 T (Tag_ABI_FP_exceptions
),
25681 T (Tag_ABI_FP_user_exceptions
),
25682 T (Tag_ABI_FP_number_model
),
25683 T (Tag_ABI_align_needed
),
25684 T (Tag_ABI_align8_needed
),
25685 T (Tag_ABI_align_preserved
),
25686 T (Tag_ABI_align8_preserved
),
25687 T (Tag_ABI_enum_size
),
25688 T (Tag_ABI_HardFP_use
),
25689 T (Tag_ABI_VFP_args
),
25690 T (Tag_ABI_WMMX_args
),
25691 T (Tag_ABI_optimization_goals
),
25692 T (Tag_ABI_FP_optimization_goals
),
25693 T (Tag_compatibility
),
25694 T (Tag_CPU_unaligned_access
),
25695 T (Tag_FP_HP_extension
),
25696 T (Tag_VFP_HP_extension
),
25697 T (Tag_ABI_FP_16bit_format
),
25698 T (Tag_MPextension_use
),
25700 T (Tag_nodefaults
),
25701 T (Tag_also_compatible_with
),
25702 T (Tag_conformance
),
25704 T (Tag_Virtualization_use
),
25705 /* We deliberately do not include Tag_MPextension_use_legacy. */
25713 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
25714 if (streq (name
, attribute_table
[i
].name
))
25715 return attribute_table
[i
].tag
;
25721 /* Apply sym value for relocations only in the case that they are for
25722 local symbols in the same segment as the fixup and you have the
25723 respective architectural feature for blx and simple switches. */
25725 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
25728 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25729 /* PR 17444: If the local symbol is in a different section then a reloc
25730 will always be generated for it, so applying the symbol value now
25731 will result in a double offset being stored in the relocation. */
25732 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
25733 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
25735 switch (fixP
->fx_r_type
)
25737 case BFD_RELOC_ARM_PCREL_BLX
:
25738 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25739 if (ARM_IS_FUNC (fixP
->fx_addsy
))
25743 case BFD_RELOC_ARM_PCREL_CALL
:
25744 case BFD_RELOC_THUMB_PCREL_BLX
:
25745 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
25756 #endif /* OBJ_ELF */