1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
49 /* This structure holds the unwinding state. */
54 symbolS
* table_entry
;
55 symbolS
* personality_routine
;
56 int personality_index
;
57 /* The segment containing the function. */
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes
;
64 /* The number of bytes pushed to the stack. */
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset
;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
74 /* Nonzero if an unwind_setfp directive has been seen. */
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored
:1;
82 /* Results from operand parsing worker functions. */
86 PARSE_OPERAND_SUCCESS
,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result
;
98 /* Types of processor to assemble for. */
100 /* The code that was here used to select a default CPU depending on compiler
101 pre-defines which were only present when doing native builds, thus
102 changing gas' default behaviour depending upon the build host.
104 If you have a target that requires a default CPU option then the you
105 should define CPU_DEFAULT here. */
110 # define FPU_DEFAULT FPU_ARCH_FPA
111 # elif defined (TE_NetBSD)
113 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 /* Legacy a.out format. */
116 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # elif defined (TE_VXWORKS)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 /* For backwards compatibility, default to FPA. */
122 # define FPU_DEFAULT FPU_ARCH_FPA
124 #endif /* ifndef FPU_DEFAULT */
126 #define streq(a, b) (strcmp (a, b) == 0)
128 static arm_feature_set cpu_variant
;
129 static arm_feature_set arm_arch_used
;
130 static arm_feature_set thumb_arch_used
;
132 /* Flags stored in private area of BFD structure. */
133 static int uses_apcs_26
= FALSE
;
134 static int atpcs
= FALSE
;
135 static int support_interwork
= FALSE
;
136 static int uses_apcs_float
= FALSE
;
137 static int pic_code
= FALSE
;
138 static int fix_v4bx
= FALSE
;
139 /* Warn on using deprecated features. */
140 static int warn_on_deprecated
= TRUE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
164 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
165 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
168 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
171 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
172 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
173 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
174 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
175 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
176 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
177 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
178 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
179 static const arm_feature_set arm_ext_v4t_5
=
180 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
181 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
182 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
183 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
184 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
185 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
186 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
188 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE (ARM_EXT_V6M
, 0);
189 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
190 static const arm_feature_set arm_ext_v6_dsp
= ARM_FEATURE (ARM_EXT_V6_DSP
, 0);
191 static const arm_feature_set arm_ext_barrier
= ARM_FEATURE (ARM_EXT_BARRIER
, 0);
192 static const arm_feature_set arm_ext_msr
= ARM_FEATURE (ARM_EXT_THUMB_MSR
, 0);
193 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
194 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
195 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
196 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
197 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
198 static const arm_feature_set arm_ext_m
=
199 ARM_FEATURE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
, 0);
200 static const arm_feature_set arm_ext_mp
= ARM_FEATURE (ARM_EXT_MP
, 0);
201 static const arm_feature_set arm_ext_sec
= ARM_FEATURE (ARM_EXT_SEC
, 0);
202 static const arm_feature_set arm_ext_os
= ARM_FEATURE (ARM_EXT_OS
, 0);
203 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE (ARM_EXT_ADIV
, 0);
205 static const arm_feature_set arm_arch_any
= ARM_ANY
;
206 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
207 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
208 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
210 static const arm_feature_set arm_cext_iwmmxt2
=
211 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
212 static const arm_feature_set arm_cext_iwmmxt
=
213 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
214 static const arm_feature_set arm_cext_xscale
=
215 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
216 static const arm_feature_set arm_cext_maverick
=
217 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
218 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
219 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
220 static const arm_feature_set fpu_vfp_ext_v1xd
=
221 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
222 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
223 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
224 static const arm_feature_set fpu_vfp_ext_v3xd
= ARM_FEATURE (0, FPU_VFP_EXT_V3xD
);
225 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
226 static const arm_feature_set fpu_vfp_ext_d32
=
227 ARM_FEATURE (0, FPU_VFP_EXT_D32
);
228 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
229 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
230 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
231 static const arm_feature_set fpu_vfp_fp16
= ARM_FEATURE (0, FPU_VFP_EXT_FP16
);
232 static const arm_feature_set fpu_neon_ext_fma
= ARM_FEATURE (0, FPU_NEON_EXT_FMA
);
233 static const arm_feature_set fpu_vfp_ext_fma
= ARM_FEATURE (0, FPU_VFP_EXT_FMA
);
235 static int mfloat_abi_opt
= -1;
236 /* Record user cpu selection for object attributes. */
237 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
238 /* Must be long enough to hold any of the names in arm_cpus. */
239 static char selected_cpu_name
[16];
242 static int meabi_flags
= EABI_DEFAULT
;
244 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
247 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
252 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
257 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
258 symbolS
* GOT_symbol
;
261 /* 0: assemble for ARM,
262 1: assemble for Thumb,
263 2: assemble for Thumb even though target CPU does not support thumb
265 static int thumb_mode
= 0;
266 /* A value distinct from the possible values for thumb_mode that we
267 can use to record whether thumb_mode has been copied into the
268 tc_frag_data field of a frag. */
269 #define MODE_RECORDED (1 << 4)
271 /* Specifies the intrinsic IT insn behavior mode. */
272 enum implicit_it_mode
274 IMPLICIT_IT_MODE_NEVER
= 0x00,
275 IMPLICIT_IT_MODE_ARM
= 0x01,
276 IMPLICIT_IT_MODE_THUMB
= 0x02,
277 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
279 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
281 /* If unified_syntax is true, we are processing the new unified
282 ARM/Thumb syntax. Important differences from the old ARM mode:
284 - Immediate operands do not require a # prefix.
285 - Conditional affixes always appear at the end of the
286 instruction. (For backward compatibility, those instructions
287 that formerly had them in the middle, continue to accept them
289 - The IT instruction may appear, and if it does is validated
290 against subsequent conditional affixes. It does not generate
293 Important differences from the old Thumb mode:
295 - Immediate operands do not require a # prefix.
296 - Most of the V6T2 instructions are only available in unified mode.
297 - The .N and .W suffixes are recognized and honored (it is an error
298 if they cannot be honored).
299 - All instructions set the flags if and only if they have an 's' affix.
300 - Conditional affixes may be used. They are validated against
301 preceding IT instructions. Unlike ARM mode, you cannot use a
302 conditional affix except in the scope of an IT instruction. */
304 static bfd_boolean unified_syntax
= FALSE
;
319 enum neon_el_type type
;
323 #define NEON_MAX_TYPE_ELS 4
327 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
331 enum it_instruction_type
336 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
337 if inside, should be the last one. */
338 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
339 i.e. BKPT and NOP. */
340 IT_INSN
/* The IT insn has been parsed. */
346 unsigned long instruction
;
350 /* "uncond_value" is set to the value in place of the conditional field in
351 unconditional versions of the instruction, or -1 if nothing is
354 struct neon_type vectype
;
355 /* This does not indicate an actual NEON instruction, only that
356 the mnemonic accepts neon-style type suffixes. */
358 /* Set to the opcode if the instruction needs relaxation.
359 Zero if the instruction is not relaxed. */
363 bfd_reloc_code_real_type type
;
368 enum it_instruction_type it_insn_type
;
374 struct neon_type_el vectype
;
375 unsigned present
: 1; /* Operand present. */
376 unsigned isreg
: 1; /* Operand was a register. */
377 unsigned immisreg
: 1; /* .imm field is a second register. */
378 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
379 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
380 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
381 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
382 instructions. This allows us to disambiguate ARM <-> vector insns. */
383 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
384 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
385 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
386 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
387 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
388 unsigned writeback
: 1; /* Operand has trailing ! */
389 unsigned preind
: 1; /* Preindexed address. */
390 unsigned postind
: 1; /* Postindexed address. */
391 unsigned negative
: 1; /* Index register was negated. */
392 unsigned shifted
: 1; /* Shift applied to operation. */
393 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
397 static struct arm_it inst
;
399 #define NUM_FLOAT_VALS 8
401 const char * fp_const
[] =
403 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
406 /* Number of littlenums required to hold an extended precision number. */
407 #define MAX_LITTLENUMS 6
409 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
419 #define CP_T_X 0x00008000
420 #define CP_T_Y 0x00400000
422 #define CONDS_BIT 0x00100000
423 #define LOAD_BIT 0x00100000
425 #define DOUBLE_LOAD_FLAG 0x00000001
429 const char * template_name
;
433 #define COND_ALWAYS 0xE
437 const char * template_name
;
441 struct asm_barrier_opt
443 const char * template_name
;
447 /* The bit that distinguishes CPSR and SPSR. */
448 #define SPSR_BIT (1 << 22)
450 /* The individual PSR flag bits. */
451 #define PSR_c (1 << 16)
452 #define PSR_x (1 << 17)
453 #define PSR_s (1 << 18)
454 #define PSR_f (1 << 19)
459 bfd_reloc_code_real_type reloc
;
464 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
465 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
470 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
473 /* Bits for DEFINED field in neon_typed_alias. */
474 #define NTA_HASTYPE 1
475 #define NTA_HASINDEX 2
477 struct neon_typed_alias
479 unsigned char defined
;
481 struct neon_type_el eltype
;
484 /* ARM register categories. This includes coprocessor numbers and various
485 architecture extensions' registers. */
511 /* Structure for a hash table entry for a register.
512 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
513 information which states whether a vector type or index is specified (for a
514 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
518 unsigned char number
;
520 unsigned char builtin
;
521 struct neon_typed_alias
* neon
;
524 /* Diagnostics used when we don't get a register of the expected type. */
525 const char * const reg_expected_msgs
[] =
527 N_("ARM register expected"),
528 N_("bad or missing co-processor number"),
529 N_("co-processor register expected"),
530 N_("FPA register expected"),
531 N_("VFP single precision register expected"),
532 N_("VFP/Neon double precision register expected"),
533 N_("Neon quad precision register expected"),
534 N_("VFP single or double precision register expected"),
535 N_("Neon double or quad precision register expected"),
536 N_("VFP single, double or Neon quad precision register expected"),
537 N_("VFP system register expected"),
538 N_("Maverick MVF register expected"),
539 N_("Maverick MVD register expected"),
540 N_("Maverick MVFX register expected"),
541 N_("Maverick MVDX register expected"),
542 N_("Maverick MVAX register expected"),
543 N_("Maverick DSPSC register expected"),
544 N_("iWMMXt data register expected"),
545 N_("iWMMXt control register expected"),
546 N_("iWMMXt scalar register expected"),
547 N_("XScale accumulator register expected"),
550 /* Some well known registers that we refer to directly elsewhere. */
555 /* ARM instructions take 4bytes in the object file, Thumb instructions
561 /* Basic string to match. */
562 const char * template_name
;
564 /* Parameters to instruction. */
565 unsigned int operands
[8];
567 /* Conditional tag - see opcode_lookup. */
568 unsigned int tag
: 4;
570 /* Basic instruction code. */
571 unsigned int avalue
: 28;
573 /* Thumb-format instruction code. */
576 /* Which architecture variant provides this instruction. */
577 const arm_feature_set
* avariant
;
578 const arm_feature_set
* tvariant
;
580 /* Function to call to encode instruction in ARM format. */
581 void (* aencode
) (void);
583 /* Function to call to encode instruction in Thumb format. */
584 void (* tencode
) (void);
587 /* Defines for various bits that we will want to toggle. */
588 #define INST_IMMEDIATE 0x02000000
589 #define OFFSET_REG 0x02000000
590 #define HWOFFSET_IMM 0x00400000
591 #define SHIFT_BY_REG 0x00000010
592 #define PRE_INDEX 0x01000000
593 #define INDEX_UP 0x00800000
594 #define WRITE_BACK 0x00200000
595 #define LDM_TYPE_2_OR_3 0x00400000
596 #define CPSI_MMOD 0x00020000
598 #define LITERAL_MASK 0xf000f000
599 #define OPCODE_MASK 0xfe1fffff
600 #define V4_STR_BIT 0x00000020
602 #define T2_SUBS_PC_LR 0xf3de8f00
604 #define DATA_OP_SHIFT 21
606 #define T2_OPCODE_MASK 0xfe1fffff
607 #define T2_DATA_OP_SHIFT 21
609 /* Codes to distinguish the arithmetic instructions. */
620 #define OPCODE_CMP 10
621 #define OPCODE_CMN 11
622 #define OPCODE_ORR 12
623 #define OPCODE_MOV 13
624 #define OPCODE_BIC 14
625 #define OPCODE_MVN 15
627 #define T2_OPCODE_AND 0
628 #define T2_OPCODE_BIC 1
629 #define T2_OPCODE_ORR 2
630 #define T2_OPCODE_ORN 3
631 #define T2_OPCODE_EOR 4
632 #define T2_OPCODE_ADD 8
633 #define T2_OPCODE_ADC 10
634 #define T2_OPCODE_SBC 11
635 #define T2_OPCODE_SUB 13
636 #define T2_OPCODE_RSB 14
638 #define T_OPCODE_MUL 0x4340
639 #define T_OPCODE_TST 0x4200
640 #define T_OPCODE_CMN 0x42c0
641 #define T_OPCODE_NEG 0x4240
642 #define T_OPCODE_MVN 0x43c0
644 #define T_OPCODE_ADD_R3 0x1800
645 #define T_OPCODE_SUB_R3 0x1a00
646 #define T_OPCODE_ADD_HI 0x4400
647 #define T_OPCODE_ADD_ST 0xb000
648 #define T_OPCODE_SUB_ST 0xb080
649 #define T_OPCODE_ADD_SP 0xa800
650 #define T_OPCODE_ADD_PC 0xa000
651 #define T_OPCODE_ADD_I8 0x3000
652 #define T_OPCODE_SUB_I8 0x3800
653 #define T_OPCODE_ADD_I3 0x1c00
654 #define T_OPCODE_SUB_I3 0x1e00
656 #define T_OPCODE_ASR_R 0x4100
657 #define T_OPCODE_LSL_R 0x4080
658 #define T_OPCODE_LSR_R 0x40c0
659 #define T_OPCODE_ROR_R 0x41c0
660 #define T_OPCODE_ASR_I 0x1000
661 #define T_OPCODE_LSL_I 0x0000
662 #define T_OPCODE_LSR_I 0x0800
664 #define T_OPCODE_MOV_I8 0x2000
665 #define T_OPCODE_CMP_I8 0x2800
666 #define T_OPCODE_CMP_LR 0x4280
667 #define T_OPCODE_MOV_HR 0x4600
668 #define T_OPCODE_CMP_HR 0x4500
670 #define T_OPCODE_LDR_PC 0x4800
671 #define T_OPCODE_LDR_SP 0x9800
672 #define T_OPCODE_STR_SP 0x9000
673 #define T_OPCODE_LDR_IW 0x6800
674 #define T_OPCODE_STR_IW 0x6000
675 #define T_OPCODE_LDR_IH 0x8800
676 #define T_OPCODE_STR_IH 0x8000
677 #define T_OPCODE_LDR_IB 0x7800
678 #define T_OPCODE_STR_IB 0x7000
679 #define T_OPCODE_LDR_RW 0x5800
680 #define T_OPCODE_STR_RW 0x5000
681 #define T_OPCODE_LDR_RH 0x5a00
682 #define T_OPCODE_STR_RH 0x5200
683 #define T_OPCODE_LDR_RB 0x5c00
684 #define T_OPCODE_STR_RB 0x5400
686 #define T_OPCODE_PUSH 0xb400
687 #define T_OPCODE_POP 0xbc00
689 #define T_OPCODE_BRANCH 0xe000
691 #define THUMB_SIZE 2 /* Size of thumb instruction. */
692 #define THUMB_PP_PC_LR 0x0100
693 #define THUMB_LOAD_BIT 0x0800
694 #define THUMB2_LOAD_BIT 0x00100000
696 #define BAD_ARGS _("bad arguments to instruction")
697 #define BAD_SP _("r13 not allowed here")
698 #define BAD_PC _("r15 not allowed here")
699 #define BAD_COND _("instruction cannot be conditional")
700 #define BAD_OVERLAP _("registers may not be the same")
701 #define BAD_HIREG _("lo register required")
702 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
703 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
704 #define BAD_BRANCH _("branch must be last instruction in IT block")
705 #define BAD_NOT_IT _("instruction not allowed in IT block")
706 #define BAD_FPU _("selected FPU does not support instruction")
707 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
708 #define BAD_IT_COND _("incorrect condition in IT block")
709 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
710 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
711 #define BAD_PC_ADDRESSING \
712 _("cannot use register index with PC-relative addressing")
713 #define BAD_PC_WRITEBACK \
714 _("cannot use writeback with PC-relative addressing")
716 static struct hash_control
* arm_ops_hsh
;
717 static struct hash_control
* arm_cond_hsh
;
718 static struct hash_control
* arm_shift_hsh
;
719 static struct hash_control
* arm_psr_hsh
;
720 static struct hash_control
* arm_v7m_psr_hsh
;
721 static struct hash_control
* arm_reg_hsh
;
722 static struct hash_control
* arm_reloc_hsh
;
723 static struct hash_control
* arm_barrier_opt_hsh
;
725 /* Stuff needed to resolve the label ambiguity
734 symbolS
* last_label_seen
;
735 static int label_is_thumb_function_name
= FALSE
;
737 /* Literal pool structure. Held on a per-section
738 and per-sub-section basis. */
740 #define MAX_LITERAL_POOL_SIZE 1024
741 typedef struct literal_pool
743 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
744 unsigned int next_free_entry
;
749 struct literal_pool
* next
;
752 /* Pointer to a linked list of literal pools. */
753 literal_pool
* list_of_pools
= NULL
;
756 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
758 static struct current_it now_it
;
762 now_it_compatible (int cond
)
764 return (cond
& ~1) == (now_it
.cc
& ~1);
768 conditional_insn (void)
770 return inst
.cond
!= COND_ALWAYS
;
773 static int in_it_block (void);
775 static int handle_it_state (void);
777 static void force_automatic_it_block_close (void);
779 static void it_fsm_post_encode (void);
781 #define set_it_insn_type(type) \
784 inst.it_insn_type = type; \
785 if (handle_it_state () == FAIL) \
790 #define set_it_insn_type_nonvoid(type, failret) \
793 inst.it_insn_type = type; \
794 if (handle_it_state () == FAIL) \
799 #define set_it_insn_type_last() \
802 if (inst.cond == COND_ALWAYS) \
803 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
805 set_it_insn_type (INSIDE_IT_LAST_INSN); \
811 /* This array holds the chars that always start a comment. If the
812 pre-processor is disabled, these aren't very useful. */
813 const char comment_chars
[] = "@";
815 /* This array holds the chars that only start a comment at the beginning of
816 a line. If the line seems to have the form '# 123 filename'
817 .line and .file directives will appear in the pre-processed output. */
818 /* Note that input_file.c hand checks for '#' at the beginning of the
819 first line of the input file. This is because the compiler outputs
820 #NO_APP at the beginning of its output. */
821 /* Also note that comments like this one will always work. */
822 const char line_comment_chars
[] = "#";
824 const char line_separator_chars
[] = ";";
826 /* Chars that can be used to separate mant
827 from exp in floating point numbers. */
828 const char EXP_CHARS
[] = "eE";
830 /* Chars that mean this number is a floating point constant. */
834 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
836 /* Prefix characters that indicate the start of an immediate
838 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
840 /* Separator character handling. */
842 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
845 skip_past_char (char ** str
, char c
)
856 #define skip_past_comma(str) skip_past_char (str, ',')
858 /* Arithmetic expressions (possibly involving symbols). */
860 /* Return TRUE if anything in the expression is a bignum. */
863 walk_no_bignums (symbolS
* sp
)
865 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
868 if (symbol_get_value_expression (sp
)->X_add_symbol
)
870 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
871 || (symbol_get_value_expression (sp
)->X_op_symbol
872 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
878 static int in_my_get_expression
= 0;
880 /* Third argument to my_get_expression. */
881 #define GE_NO_PREFIX 0
882 #define GE_IMM_PREFIX 1
883 #define GE_OPT_PREFIX 2
884 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
885 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
886 #define GE_OPT_PREFIX_BIG 3
889 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
894 /* In unified syntax, all prefixes are optional. */
896 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
901 case GE_NO_PREFIX
: break;
903 if (!is_immediate_prefix (**str
))
905 inst
.error
= _("immediate expression requires a # prefix");
911 case GE_OPT_PREFIX_BIG
:
912 if (is_immediate_prefix (**str
))
918 memset (ep
, 0, sizeof (expressionS
));
920 save_in
= input_line_pointer
;
921 input_line_pointer
= *str
;
922 in_my_get_expression
= 1;
923 seg
= expression (ep
);
924 in_my_get_expression
= 0;
926 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
928 /* We found a bad or missing expression in md_operand(). */
929 *str
= input_line_pointer
;
930 input_line_pointer
= save_in
;
931 if (inst
.error
== NULL
)
932 inst
.error
= (ep
->X_op
== O_absent
933 ? _("missing expression") :_("bad expression"));
938 if (seg
!= absolute_section
939 && seg
!= text_section
940 && seg
!= data_section
941 && seg
!= bss_section
942 && seg
!= undefined_section
)
944 inst
.error
= _("bad segment");
945 *str
= input_line_pointer
;
946 input_line_pointer
= save_in
;
953 /* Get rid of any bignums now, so that we don't generate an error for which
954 we can't establish a line number later on. Big numbers are never valid
955 in instructions, which is where this routine is always called. */
956 if (prefix_mode
!= GE_OPT_PREFIX_BIG
957 && (ep
->X_op
== O_big
959 && (walk_no_bignums (ep
->X_add_symbol
)
961 && walk_no_bignums (ep
->X_op_symbol
))))))
963 inst
.error
= _("invalid constant");
964 *str
= input_line_pointer
;
965 input_line_pointer
= save_in
;
969 *str
= input_line_pointer
;
970 input_line_pointer
= save_in
;
974 /* Turn a string in input_line_pointer into a floating point constant
975 of type TYPE, and store the appropriate bytes in *LITP. The number
976 of LITTLENUMS emitted is stored in *SIZEP. An error message is
977 returned, or NULL on OK.
979 Note that fp constants aren't represent in the normal way on the ARM.
980 In big endian mode, things are as expected. However, in little endian
981 mode fp constants are big-endian word-wise, and little-endian byte-wise
982 within the words. For example, (double) 1.1 in big endian mode is
983 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
984 the byte sequence 99 99 f1 3f 9a 99 99 99.
986 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
989 md_atof (int type
, char * litP
, int * sizeP
)
992 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1024 return _("Unrecognized or unsupported floating point constant");
1027 t
= atof_ieee (input_line_pointer
, type
, words
);
1029 input_line_pointer
= t
;
1030 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1032 if (target_big_endian
)
1034 for (i
= 0; i
< prec
; i
++)
1036 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1037 litP
+= sizeof (LITTLENUM_TYPE
);
1042 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1043 for (i
= prec
- 1; i
>= 0; i
--)
1045 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1046 litP
+= sizeof (LITTLENUM_TYPE
);
1049 /* For a 4 byte float the order of elements in `words' is 1 0.
1050 For an 8 byte float the order is 1 0 3 2. */
1051 for (i
= 0; i
< prec
; i
+= 2)
1053 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1054 sizeof (LITTLENUM_TYPE
));
1055 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1056 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1057 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1064 /* We handle all bad expressions here, so that we can report the faulty
1065 instruction in the error message. */
1067 md_operand (expressionS
* exp
)
1069 if (in_my_get_expression
)
1070 exp
->X_op
= O_illegal
;
1073 /* Immediate values. */
1075 /* Generic immediate-value read function for use in directives.
1076 Accepts anything that 'expression' can fold to a constant.
1077 *val receives the number. */
1080 immediate_for_directive (int *val
)
1083 exp
.X_op
= O_illegal
;
1085 if (is_immediate_prefix (*input_line_pointer
))
1087 input_line_pointer
++;
1091 if (exp
.X_op
!= O_constant
)
1093 as_bad (_("expected #constant"));
1094 ignore_rest_of_line ();
1097 *val
= exp
.X_add_number
;
1102 /* Register parsing. */
1104 /* Generic register parser. CCP points to what should be the
1105 beginning of a register name. If it is indeed a valid register
1106 name, advance CCP over it and return the reg_entry structure;
1107 otherwise return NULL. Does not issue diagnostics. */
1109 static struct reg_entry
*
1110 arm_reg_parse_multi (char **ccp
)
1114 struct reg_entry
*reg
;
1116 #ifdef REGISTER_PREFIX
1117 if (*start
!= REGISTER_PREFIX
)
1121 #ifdef OPTIONAL_REGISTER_PREFIX
1122 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1127 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1132 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1134 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1144 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1145 enum arm_reg_type type
)
1147 /* Alternative syntaxes are accepted for a few register classes. */
1154 /* Generic coprocessor register names are allowed for these. */
1155 if (reg
&& reg
->type
== REG_TYPE_CN
)
1160 /* For backward compatibility, a bare number is valid here. */
1162 unsigned long processor
= strtoul (start
, ccp
, 10);
1163 if (*ccp
!= start
&& processor
<= 15)
1167 case REG_TYPE_MMXWC
:
1168 /* WC includes WCG. ??? I'm not sure this is true for all
1169 instructions that take WC registers. */
1170 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1181 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1182 return value is the register number or FAIL. */
1185 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1188 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1191 /* Do not allow a scalar (reg+index) to parse as a register. */
1192 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1195 if (reg
&& reg
->type
== type
)
1198 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1205 /* Parse a Neon type specifier. *STR should point at the leading '.'
1206 character. Does no verification at this stage that the type fits the opcode
1213 Can all be legally parsed by this function.
1215 Fills in neon_type struct pointer with parsed information, and updates STR
1216 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1217 type, FAIL if not. */
1220 parse_neon_type (struct neon_type
*type
, char **str
)
1227 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1229 enum neon_el_type thistype
= NT_untyped
;
1230 unsigned thissize
= -1u;
1237 /* Just a size without an explicit type. */
1241 switch (TOLOWER (*ptr
))
1243 case 'i': thistype
= NT_integer
; break;
1244 case 'f': thistype
= NT_float
; break;
1245 case 'p': thistype
= NT_poly
; break;
1246 case 's': thistype
= NT_signed
; break;
1247 case 'u': thistype
= NT_unsigned
; break;
1249 thistype
= NT_float
;
1254 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1260 /* .f is an abbreviation for .f32. */
1261 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1266 thissize
= strtoul (ptr
, &ptr
, 10);
1268 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1271 as_bad (_("bad size %d in type specifier"), thissize
);
1279 type
->el
[type
->elems
].type
= thistype
;
1280 type
->el
[type
->elems
].size
= thissize
;
1285 /* Empty/missing type is not a successful parse. */
1286 if (type
->elems
== 0)
1294 /* Errors may be set multiple times during parsing or bit encoding
1295 (particularly in the Neon bits), but usually the earliest error which is set
1296 will be the most meaningful. Avoid overwriting it with later (cascading)
1297 errors by calling this function. */
1300 first_error (const char *err
)
1306 /* Parse a single type, e.g. ".s32", leading period included. */
1308 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1311 struct neon_type optype
;
1315 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1317 if (optype
.elems
== 1)
1318 *vectype
= optype
.el
[0];
1321 first_error (_("only one type should be specified for operand"));
1327 first_error (_("vector type expected"));
1339 /* Special meanings for indices (which have a range of 0-7), which will fit into
1342 #define NEON_ALL_LANES 15
1343 #define NEON_INTERLEAVE_LANES 14
1345 /* Parse either a register or a scalar, with an optional type. Return the
1346 register number, and optionally fill in the actual type of the register
1347 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1348 type/index information in *TYPEINFO. */
1351 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1352 enum arm_reg_type
*rtype
,
1353 struct neon_typed_alias
*typeinfo
)
1356 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1357 struct neon_typed_alias atype
;
1358 struct neon_type_el parsetype
;
1362 atype
.eltype
.type
= NT_invtype
;
1363 atype
.eltype
.size
= -1;
1365 /* Try alternate syntax for some types of register. Note these are mutually
1366 exclusive with the Neon syntax extensions. */
1369 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1377 /* Undo polymorphism when a set of register types may be accepted. */
1378 if ((type
== REG_TYPE_NDQ
1379 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1380 || (type
== REG_TYPE_VFSD
1381 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1382 || (type
== REG_TYPE_NSDQ
1383 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1384 || reg
->type
== REG_TYPE_NQ
))
1385 || (type
== REG_TYPE_MMXWC
1386 && (reg
->type
== REG_TYPE_MMXWCG
)))
1387 type
= (enum arm_reg_type
) reg
->type
;
1389 if (type
!= reg
->type
)
1395 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1397 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1399 first_error (_("can't redefine type for operand"));
1402 atype
.defined
|= NTA_HASTYPE
;
1403 atype
.eltype
= parsetype
;
1406 if (skip_past_char (&str
, '[') == SUCCESS
)
1408 if (type
!= REG_TYPE_VFD
)
1410 first_error (_("only D registers may be indexed"));
1414 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1416 first_error (_("can't change index for operand"));
1420 atype
.defined
|= NTA_HASINDEX
;
1422 if (skip_past_char (&str
, ']') == SUCCESS
)
1423 atype
.index
= NEON_ALL_LANES
;
1428 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1430 if (exp
.X_op
!= O_constant
)
1432 first_error (_("constant expression required"));
1436 if (skip_past_char (&str
, ']') == FAIL
)
1439 atype
.index
= exp
.X_add_number
;
1454 /* Like arm_reg_parse, but allow allow the following extra features:
1455 - If RTYPE is non-zero, return the (possibly restricted) type of the
1456 register (e.g. Neon double or quad reg when either has been requested).
1457 - If this is a Neon vector type with additional type information, fill
1458 in the struct pointed to by VECTYPE (if non-NULL).
1459 This function will fault on encountering a scalar. */
1462 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1463 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1465 struct neon_typed_alias atype
;
1467 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1472 /* Do not allow a scalar (reg+index) to parse as a register. */
1473 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1475 first_error (_("register operand expected, but got scalar"));
1480 *vectype
= atype
.eltype
;
1487 #define NEON_SCALAR_REG(X) ((X) >> 4)
1488 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1490 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1491 have enough information to be able to do a good job bounds-checking. So, we
1492 just do easy checks here, and do further checks later. */
1495 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1499 struct neon_typed_alias atype
;
1501 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1503 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1506 if (atype
.index
== NEON_ALL_LANES
)
1508 first_error (_("scalar must have an index"));
1511 else if (atype
.index
>= 64 / elsize
)
1513 first_error (_("scalar index out of range"));
1518 *type
= atype
.eltype
;
1522 return reg
* 16 + atype
.index
;
1525 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1528 parse_reg_list (char ** strp
)
1530 char * str
= * strp
;
1534 /* We come back here if we get ranges concatenated by '+' or '|'. */
1549 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1551 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1561 first_error (_("bad range in register list"));
1565 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1567 if (range
& (1 << i
))
1569 (_("Warning: duplicated register (r%d) in register list"),
1577 if (range
& (1 << reg
))
1578 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1580 else if (reg
<= cur_reg
)
1581 as_tsktsk (_("Warning: register range not in ascending order"));
1586 while (skip_past_comma (&str
) != FAIL
1587 || (in_range
= 1, *str
++ == '-'));
1592 first_error (_("missing `}'"));
1600 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1603 if (exp
.X_op
== O_constant
)
1605 if (exp
.X_add_number
1606 != (exp
.X_add_number
& 0x0000ffff))
1608 inst
.error
= _("invalid register mask");
1612 if ((range
& exp
.X_add_number
) != 0)
1614 int regno
= range
& exp
.X_add_number
;
1617 regno
= (1 << regno
) - 1;
1619 (_("Warning: duplicated register (r%d) in register list"),
1623 range
|= exp
.X_add_number
;
1627 if (inst
.reloc
.type
!= 0)
1629 inst
.error
= _("expression too complex");
1633 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1634 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1635 inst
.reloc
.pc_rel
= 0;
1639 if (*str
== '|' || *str
== '+')
1645 while (another_range
);
1651 /* Types of registers in a list. */
1660 /* Parse a VFP register list. If the string is invalid return FAIL.
1661 Otherwise return the number of registers, and set PBASE to the first
1662 register. Parses registers of type ETYPE.
1663 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1664 - Q registers can be used to specify pairs of D registers
1665 - { } can be omitted from around a singleton register list
1666 FIXME: This is not implemented, as it would require backtracking in
1669 This could be done (the meaning isn't really ambiguous), but doesn't
1670 fit in well with the current parsing framework.
1671 - 32 D registers may be used (also true for VFPv3).
1672 FIXME: Types are ignored in these register lists, which is probably a
1676 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1681 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1685 unsigned long mask
= 0;
1690 inst
.error
= _("expecting {");
1699 regtype
= REG_TYPE_VFS
;
1704 regtype
= REG_TYPE_VFD
;
1707 case REGLIST_NEON_D
:
1708 regtype
= REG_TYPE_NDQ
;
1712 if (etype
!= REGLIST_VFP_S
)
1714 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1715 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1719 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1722 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1729 base_reg
= max_regs
;
1733 int setmask
= 1, addregs
= 1;
1735 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1737 if (new_base
== FAIL
)
1739 first_error (_(reg_expected_msgs
[regtype
]));
1743 if (new_base
>= max_regs
)
1745 first_error (_("register out of range in list"));
1749 /* Note: a value of 2 * n is returned for the register Q<n>. */
1750 if (regtype
== REG_TYPE_NQ
)
1756 if (new_base
< base_reg
)
1757 base_reg
= new_base
;
1759 if (mask
& (setmask
<< new_base
))
1761 first_error (_("invalid register list"));
1765 if ((mask
>> new_base
) != 0 && ! warned
)
1767 as_tsktsk (_("register list not in ascending order"));
1771 mask
|= setmask
<< new_base
;
1774 if (*str
== '-') /* We have the start of a range expression */
1780 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1783 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1787 if (high_range
>= max_regs
)
1789 first_error (_("register out of range in list"));
1793 if (regtype
== REG_TYPE_NQ
)
1794 high_range
= high_range
+ 1;
1796 if (high_range
<= new_base
)
1798 inst
.error
= _("register range not in ascending order");
1802 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1804 if (mask
& (setmask
<< new_base
))
1806 inst
.error
= _("invalid register list");
1810 mask
|= setmask
<< new_base
;
1815 while (skip_past_comma (&str
) != FAIL
);
1819 /* Sanity check -- should have raised a parse error above. */
1820 if (count
== 0 || count
> max_regs
)
1825 /* Final test -- the registers must be consecutive. */
1827 for (i
= 0; i
< count
; i
++)
1829 if ((mask
& (1u << i
)) == 0)
1831 inst
.error
= _("non-contiguous register range");
1841 /* True if two alias types are the same. */
1844 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1852 if (a
->defined
!= b
->defined
)
1855 if ((a
->defined
& NTA_HASTYPE
) != 0
1856 && (a
->eltype
.type
!= b
->eltype
.type
1857 || a
->eltype
.size
!= b
->eltype
.size
))
1860 if ((a
->defined
& NTA_HASINDEX
) != 0
1861 && (a
->index
!= b
->index
))
1867 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1868 The base register is put in *PBASE.
1869 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1871 The register stride (minus one) is put in bit 4 of the return value.
1872 Bits [6:5] encode the list length (minus one).
1873 The type of the list elements is put in *ELTYPE, if non-NULL. */
1875 #define NEON_LANE(X) ((X) & 0xf)
1876 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1877 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1880 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1881 struct neon_type_el
*eltype
)
1888 int leading_brace
= 0;
1889 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1890 const char *const incr_error
= _("register stride must be 1 or 2");
1891 const char *const type_error
= _("mismatched element/structure types in list");
1892 struct neon_typed_alias firsttype
;
1894 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1899 struct neon_typed_alias atype
;
1900 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1904 first_error (_(reg_expected_msgs
[rtype
]));
1911 if (rtype
== REG_TYPE_NQ
)
1917 else if (reg_incr
== -1)
1919 reg_incr
= getreg
- base_reg
;
1920 if (reg_incr
< 1 || reg_incr
> 2)
1922 first_error (_(incr_error
));
1926 else if (getreg
!= base_reg
+ reg_incr
* count
)
1928 first_error (_(incr_error
));
1932 if (! neon_alias_types_same (&atype
, &firsttype
))
1934 first_error (_(type_error
));
1938 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1942 struct neon_typed_alias htype
;
1943 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1945 lane
= NEON_INTERLEAVE_LANES
;
1946 else if (lane
!= NEON_INTERLEAVE_LANES
)
1948 first_error (_(type_error
));
1953 else if (reg_incr
!= 1)
1955 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1959 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1962 first_error (_(reg_expected_msgs
[rtype
]));
1965 if (! neon_alias_types_same (&htype
, &firsttype
))
1967 first_error (_(type_error
));
1970 count
+= hireg
+ dregs
- getreg
;
1974 /* If we're using Q registers, we can't use [] or [n] syntax. */
1975 if (rtype
== REG_TYPE_NQ
)
1981 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1985 else if (lane
!= atype
.index
)
1987 first_error (_(type_error
));
1991 else if (lane
== -1)
1992 lane
= NEON_INTERLEAVE_LANES
;
1993 else if (lane
!= NEON_INTERLEAVE_LANES
)
1995 first_error (_(type_error
));
2000 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2002 /* No lane set by [x]. We must be interleaving structures. */
2004 lane
= NEON_INTERLEAVE_LANES
;
2007 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2008 || (count
> 1 && reg_incr
== -1))
2010 first_error (_("error parsing element/structure list"));
2014 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2016 first_error (_("expected }"));
2024 *eltype
= firsttype
.eltype
;
2029 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2032 /* Parse an explicit relocation suffix on an expression. This is
2033 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2034 arm_reloc_hsh contains no entries, so this function can only
2035 succeed if there is no () after the word. Returns -1 on error,
2036 BFD_RELOC_UNUSED if there wasn't any suffix. */
2038 parse_reloc (char **str
)
2040 struct reloc_entry
*r
;
2044 return BFD_RELOC_UNUSED
;
2049 while (*q
&& *q
!= ')' && *q
!= ',')
2054 if ((r
= (struct reloc_entry
*)
2055 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2062 /* Directives: register aliases. */
2064 static struct reg_entry
*
2065 insert_reg_alias (char *str
, int number
, int type
)
2067 struct reg_entry
*new_reg
;
2070 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2072 if (new_reg
->builtin
)
2073 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2075 /* Only warn about a redefinition if it's not defined as the
2077 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2078 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2083 name
= xstrdup (str
);
2084 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2086 new_reg
->name
= name
;
2087 new_reg
->number
= number
;
2088 new_reg
->type
= type
;
2089 new_reg
->builtin
= FALSE
;
2090 new_reg
->neon
= NULL
;
2092 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2099 insert_neon_reg_alias (char *str
, int number
, int type
,
2100 struct neon_typed_alias
*atype
)
2102 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2106 first_error (_("attempt to redefine typed alias"));
2112 reg
->neon
= (struct neon_typed_alias
*)
2113 xmalloc (sizeof (struct neon_typed_alias
));
2114 *reg
->neon
= *atype
;
2118 /* Look for the .req directive. This is of the form:
2120 new_register_name .req existing_register_name
2122 If we find one, or if it looks sufficiently like one that we want to
2123 handle any error here, return TRUE. Otherwise return FALSE. */
2126 create_register_alias (char * newname
, char *p
)
2128 struct reg_entry
*old
;
2129 char *oldname
, *nbuf
;
2132 /* The input scrubber ensures that whitespace after the mnemonic is
2133 collapsed to single spaces. */
2135 if (strncmp (oldname
, " .req ", 6) != 0)
2139 if (*oldname
== '\0')
2142 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2145 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2149 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2150 the desired alias name, and p points to its end. If not, then
2151 the desired alias name is in the global original_case_string. */
2152 #ifdef TC_CASE_SENSITIVE
2155 newname
= original_case_string
;
2156 nlen
= strlen (newname
);
2159 nbuf
= (char *) alloca (nlen
+ 1);
2160 memcpy (nbuf
, newname
, nlen
);
2163 /* Create aliases under the new name as stated; an all-lowercase
2164 version of the new name; and an all-uppercase version of the new
2166 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2168 for (p
= nbuf
; *p
; p
++)
2171 if (strncmp (nbuf
, newname
, nlen
))
2173 /* If this attempt to create an additional alias fails, do not bother
2174 trying to create the all-lower case alias. We will fail and issue
2175 a second, duplicate error message. This situation arises when the
2176 programmer does something like:
2179 The second .req creates the "Foo" alias but then fails to create
2180 the artificial FOO alias because it has already been created by the
2182 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2186 for (p
= nbuf
; *p
; p
++)
2189 if (strncmp (nbuf
, newname
, nlen
))
2190 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2196 /* Create a Neon typed/indexed register alias using directives, e.g.:
2201 These typed registers can be used instead of the types specified after the
2202 Neon mnemonic, so long as all operands given have types. Types can also be
2203 specified directly, e.g.:
2204 vadd d0.s32, d1.s32, d2.s32 */
2207 create_neon_reg_alias (char *newname
, char *p
)
2209 enum arm_reg_type basetype
;
2210 struct reg_entry
*basereg
;
2211 struct reg_entry mybasereg
;
2212 struct neon_type ntype
;
2213 struct neon_typed_alias typeinfo
;
2214 char *namebuf
, *nameend
;
2217 typeinfo
.defined
= 0;
2218 typeinfo
.eltype
.type
= NT_invtype
;
2219 typeinfo
.eltype
.size
= -1;
2220 typeinfo
.index
= -1;
2224 if (strncmp (p
, " .dn ", 5) == 0)
2225 basetype
= REG_TYPE_VFD
;
2226 else if (strncmp (p
, " .qn ", 5) == 0)
2227 basetype
= REG_TYPE_NQ
;
2236 basereg
= arm_reg_parse_multi (&p
);
2238 if (basereg
&& basereg
->type
!= basetype
)
2240 as_bad (_("bad type for register"));
2244 if (basereg
== NULL
)
2247 /* Try parsing as an integer. */
2248 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2249 if (exp
.X_op
!= O_constant
)
2251 as_bad (_("expression must be constant"));
2254 basereg
= &mybasereg
;
2255 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2261 typeinfo
= *basereg
->neon
;
2263 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2265 /* We got a type. */
2266 if (typeinfo
.defined
& NTA_HASTYPE
)
2268 as_bad (_("can't redefine the type of a register alias"));
2272 typeinfo
.defined
|= NTA_HASTYPE
;
2273 if (ntype
.elems
!= 1)
2275 as_bad (_("you must specify a single type only"));
2278 typeinfo
.eltype
= ntype
.el
[0];
2281 if (skip_past_char (&p
, '[') == SUCCESS
)
2284 /* We got a scalar index. */
2286 if (typeinfo
.defined
& NTA_HASINDEX
)
2288 as_bad (_("can't redefine the index of a scalar alias"));
2292 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2294 if (exp
.X_op
!= O_constant
)
2296 as_bad (_("scalar index must be constant"));
2300 typeinfo
.defined
|= NTA_HASINDEX
;
2301 typeinfo
.index
= exp
.X_add_number
;
2303 if (skip_past_char (&p
, ']') == FAIL
)
2305 as_bad (_("expecting ]"));
2310 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2311 the desired alias name, and p points to its end. If not, then
2312 the desired alias name is in the global original_case_string. */
2313 #ifdef TC_CASE_SENSITIVE
2314 namelen
= nameend
- newname
;
2316 newname
= original_case_string
;
2317 namelen
= strlen (newname
);
2320 namebuf
= (char *) alloca (namelen
+ 1);
2321 strncpy (namebuf
, newname
, namelen
);
2322 namebuf
[namelen
] = '\0';
2324 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2325 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2327 /* Insert name in all uppercase. */
2328 for (p
= namebuf
; *p
; p
++)
2331 if (strncmp (namebuf
, newname
, namelen
))
2332 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2333 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2335 /* Insert name in all lowercase. */
2336 for (p
= namebuf
; *p
; p
++)
2339 if (strncmp (namebuf
, newname
, namelen
))
2340 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2341 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2346 /* Should never be called, as .req goes between the alias and the
2347 register name, not at the beginning of the line. */
2350 s_req (int a ATTRIBUTE_UNUSED
)
2352 as_bad (_("invalid syntax for .req directive"));
2356 s_dn (int a ATTRIBUTE_UNUSED
)
2358 as_bad (_("invalid syntax for .dn directive"));
2362 s_qn (int a ATTRIBUTE_UNUSED
)
2364 as_bad (_("invalid syntax for .qn directive"));
2367 /* The .unreq directive deletes an alias which was previously defined
2368 by .req. For example:
2374 s_unreq (int a ATTRIBUTE_UNUSED
)
2379 name
= input_line_pointer
;
2381 while (*input_line_pointer
!= 0
2382 && *input_line_pointer
!= ' '
2383 && *input_line_pointer
!= '\n')
2384 ++input_line_pointer
;
2386 saved_char
= *input_line_pointer
;
2387 *input_line_pointer
= 0;
2390 as_bad (_("invalid syntax for .unreq directive"));
2393 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2397 as_bad (_("unknown register alias '%s'"), name
);
2398 else if (reg
->builtin
)
2399 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2406 hash_delete (arm_reg_hsh
, name
, FALSE
);
2407 free ((char *) reg
->name
);
2412 /* Also locate the all upper case and all lower case versions.
2413 Do not complain if we cannot find one or the other as it
2414 was probably deleted above. */
2416 nbuf
= strdup (name
);
2417 for (p
= nbuf
; *p
; p
++)
2419 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2422 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2423 free ((char *) reg
->name
);
2429 for (p
= nbuf
; *p
; p
++)
2431 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2434 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2435 free ((char *) reg
->name
);
2445 *input_line_pointer
= saved_char
;
2446 demand_empty_rest_of_line ();
2449 /* Directives: Instruction set selection. */
2452 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2453 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2454 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2455 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2457 /* Create a new mapping symbol for the transition to STATE. */
2460 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2463 const char * symname
;
2470 type
= BSF_NO_FLAGS
;
2474 type
= BSF_NO_FLAGS
;
2478 type
= BSF_NO_FLAGS
;
2484 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2485 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2490 THUMB_SET_FUNC (symbolP
, 0);
2491 ARM_SET_THUMB (symbolP
, 0);
2492 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2496 THUMB_SET_FUNC (symbolP
, 1);
2497 ARM_SET_THUMB (symbolP
, 1);
2498 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2506 /* Save the mapping symbols for future reference. Also check that
2507 we do not place two mapping symbols at the same offset within a
2508 frag. We'll handle overlap between frags in
2509 check_mapping_symbols.
2511 If .fill or other data filling directive generates zero sized data,
2512 the mapping symbol for the following code will have the same value
2513 as the one generated for the data filling directive. In this case,
2514 we replace the old symbol with the new one at the same address. */
2517 if (frag
->tc_frag_data
.first_map
!= NULL
)
2519 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2520 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2522 frag
->tc_frag_data
.first_map
= symbolP
;
2524 if (frag
->tc_frag_data
.last_map
!= NULL
)
2526 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2527 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2528 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2530 frag
->tc_frag_data
.last_map
= symbolP
;
2533 /* We must sometimes convert a region marked as code to data during
2534 code alignment, if an odd number of bytes have to be padded. The
2535 code mapping symbol is pushed to an aligned address. */
2538 insert_data_mapping_symbol (enum mstate state
,
2539 valueT value
, fragS
*frag
, offsetT bytes
)
2541 /* If there was already a mapping symbol, remove it. */
2542 if (frag
->tc_frag_data
.last_map
!= NULL
2543 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2545 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2549 know (frag
->tc_frag_data
.first_map
== symp
);
2550 frag
->tc_frag_data
.first_map
= NULL
;
2552 frag
->tc_frag_data
.last_map
= NULL
;
2553 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2556 make_mapping_symbol (MAP_DATA
, value
, frag
);
2557 make_mapping_symbol (state
, value
+ bytes
, frag
);
2560 static void mapping_state_2 (enum mstate state
, int max_chars
);
2562 /* Set the mapping state to STATE. Only call this when about to
2563 emit some STATE bytes to the file. */
2566 mapping_state (enum mstate state
)
2568 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2570 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2572 if (mapstate
== state
)
2573 /* The mapping symbol has already been emitted.
2574 There is nothing else to do. */
2576 else if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2577 /* This case will be evaluated later in the next else. */
2579 else if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2580 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2582 /* Only add the symbol if the offset is > 0:
2583 if we're at the first frag, check it's size > 0;
2584 if we're not at the first frag, then for sure
2585 the offset is > 0. */
2586 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2587 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2590 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2593 mapping_state_2 (state
, 0);
2597 /* Same as mapping_state, but MAX_CHARS bytes have already been
2598 allocated. Put the mapping symbol that far back. */
2601 mapping_state_2 (enum mstate state
, int max_chars
)
2603 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2605 if (!SEG_NORMAL (now_seg
))
2608 if (mapstate
== state
)
2609 /* The mapping symbol has already been emitted.
2610 There is nothing else to do. */
2613 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2614 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2617 #define mapping_state(x) ((void)0)
2618 #define mapping_state_2(x, y) ((void)0)
2621 /* Find the real, Thumb encoded start of a Thumb function. */
2625 find_real_start (symbolS
* symbolP
)
2628 const char * name
= S_GET_NAME (symbolP
);
2629 symbolS
* new_target
;
2631 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2632 #define STUB_NAME ".real_start_of"
2637 /* The compiler may generate BL instructions to local labels because
2638 it needs to perform a branch to a far away location. These labels
2639 do not have a corresponding ".real_start_of" label. We check
2640 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2641 the ".real_start_of" convention for nonlocal branches. */
2642 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2645 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2646 new_target
= symbol_find (real_start
);
2648 if (new_target
== NULL
)
2650 as_warn (_("Failed to find real start of function: %s\n"), name
);
2651 new_target
= symbolP
;
2659 opcode_select (int width
)
2666 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2667 as_bad (_("selected processor does not support THUMB opcodes"));
2670 /* No need to force the alignment, since we will have been
2671 coming from ARM mode, which is word-aligned. */
2672 record_alignment (now_seg
, 1);
2679 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2680 as_bad (_("selected processor does not support ARM opcodes"));
2685 frag_align (2, 0, 0);
2687 record_alignment (now_seg
, 1);
2692 as_bad (_("invalid instruction size selected (%d)"), width
);
2697 s_arm (int ignore ATTRIBUTE_UNUSED
)
2700 demand_empty_rest_of_line ();
2704 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2707 demand_empty_rest_of_line ();
2711 s_code (int unused ATTRIBUTE_UNUSED
)
2715 temp
= get_absolute_expression ();
2720 opcode_select (temp
);
2724 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2729 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2731 /* If we are not already in thumb mode go into it, EVEN if
2732 the target processor does not support thumb instructions.
2733 This is used by gcc/config/arm/lib1funcs.asm for example
2734 to compile interworking support functions even if the
2735 target processor should not support interworking. */
2739 record_alignment (now_seg
, 1);
2742 demand_empty_rest_of_line ();
2746 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2750 /* The following label is the name/address of the start of a Thumb function.
2751 We need to know this for the interworking support. */
2752 label_is_thumb_function_name
= TRUE
;
2755 /* Perform a .set directive, but also mark the alias as
2756 being a thumb function. */
2759 s_thumb_set (int equiv
)
2761 /* XXX the following is a duplicate of the code for s_set() in read.c
2762 We cannot just call that code as we need to get at the symbol that
2769 /* Especial apologies for the random logic:
2770 This just grew, and could be parsed much more simply!
2772 name
= input_line_pointer
;
2773 delim
= get_symbol_end ();
2774 end_name
= input_line_pointer
;
2777 if (*input_line_pointer
!= ',')
2780 as_bad (_("expected comma after name \"%s\""), name
);
2782 ignore_rest_of_line ();
2786 input_line_pointer
++;
2789 if (name
[0] == '.' && name
[1] == '\0')
2791 /* XXX - this should not happen to .thumb_set. */
2795 if ((symbolP
= symbol_find (name
)) == NULL
2796 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2799 /* When doing symbol listings, play games with dummy fragments living
2800 outside the normal fragment chain to record the file and line info
2802 if (listing
& LISTING_SYMBOLS
)
2804 extern struct list_info_struct
* listing_tail
;
2805 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2807 memset (dummy_frag
, 0, sizeof (fragS
));
2808 dummy_frag
->fr_type
= rs_fill
;
2809 dummy_frag
->line
= listing_tail
;
2810 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2811 dummy_frag
->fr_symbol
= symbolP
;
2815 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2818 /* "set" symbols are local unless otherwise specified. */
2819 SF_SET_LOCAL (symbolP
);
2820 #endif /* OBJ_COFF */
2821 } /* Make a new symbol. */
2823 symbol_table_insert (symbolP
);
2828 && S_IS_DEFINED (symbolP
)
2829 && S_GET_SEGMENT (symbolP
) != reg_section
)
2830 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2832 pseudo_set (symbolP
);
2834 demand_empty_rest_of_line ();
2836 /* XXX Now we come to the Thumb specific bit of code. */
2838 THUMB_SET_FUNC (symbolP
, 1);
2839 ARM_SET_THUMB (symbolP
, 1);
2840 #if defined OBJ_ELF || defined OBJ_COFF
2841 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2845 /* Directives: Mode selection. */
2847 /* .syntax [unified|divided] - choose the new unified syntax
2848 (same for Arm and Thumb encoding, modulo slight differences in what
2849 can be represented) or the old divergent syntax for each mode. */
2851 s_syntax (int unused ATTRIBUTE_UNUSED
)
2855 name
= input_line_pointer
;
2856 delim
= get_symbol_end ();
2858 if (!strcasecmp (name
, "unified"))
2859 unified_syntax
= TRUE
;
2860 else if (!strcasecmp (name
, "divided"))
2861 unified_syntax
= FALSE
;
2864 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2867 *input_line_pointer
= delim
;
2868 demand_empty_rest_of_line ();
2871 /* Directives: sectioning and alignment. */
2873 /* Same as s_align_ptwo but align 0 => align 2. */
2876 s_align (int unused ATTRIBUTE_UNUSED
)
2881 long max_alignment
= 15;
2883 temp
= get_absolute_expression ();
2884 if (temp
> max_alignment
)
2885 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2888 as_bad (_("alignment negative. 0 assumed."));
2892 if (*input_line_pointer
== ',')
2894 input_line_pointer
++;
2895 temp_fill
= get_absolute_expression ();
2907 /* Only make a frag if we HAVE to. */
2908 if (temp
&& !need_pass_2
)
2910 if (!fill_p
&& subseg_text_p (now_seg
))
2911 frag_align_code (temp
, 0);
2913 frag_align (temp
, (int) temp_fill
, 0);
2915 demand_empty_rest_of_line ();
2917 record_alignment (now_seg
, temp
);
2921 s_bss (int ignore ATTRIBUTE_UNUSED
)
2923 /* We don't support putting frags in the BSS segment, we fake it by
2924 marking in_bss, then looking at s_skip for clues. */
2925 subseg_set (bss_section
, 0);
2926 demand_empty_rest_of_line ();
2928 #ifdef md_elf_section_change_hook
2929 md_elf_section_change_hook ();
2934 s_even (int ignore ATTRIBUTE_UNUSED
)
2936 /* Never make frag if expect extra pass. */
2938 frag_align (1, 0, 0);
2940 record_alignment (now_seg
, 1);
2942 demand_empty_rest_of_line ();
2945 /* Directives: Literal pools. */
2947 static literal_pool
*
2948 find_literal_pool (void)
2950 literal_pool
* pool
;
2952 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2954 if (pool
->section
== now_seg
2955 && pool
->sub_section
== now_subseg
)
2962 static literal_pool
*
2963 find_or_make_literal_pool (void)
2965 /* Next literal pool ID number. */
2966 static unsigned int latest_pool_num
= 1;
2967 literal_pool
* pool
;
2969 pool
= find_literal_pool ();
2973 /* Create a new pool. */
2974 pool
= (literal_pool
*) xmalloc (sizeof (* pool
));
2978 pool
->next_free_entry
= 0;
2979 pool
->section
= now_seg
;
2980 pool
->sub_section
= now_subseg
;
2981 pool
->next
= list_of_pools
;
2982 pool
->symbol
= NULL
;
2984 /* Add it to the list. */
2985 list_of_pools
= pool
;
2988 /* New pools, and emptied pools, will have a NULL symbol. */
2989 if (pool
->symbol
== NULL
)
2991 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2992 (valueT
) 0, &zero_address_frag
);
2993 pool
->id
= latest_pool_num
++;
3000 /* Add the literal in the global 'inst'
3001 structure to the relevant literal pool. */
3004 add_to_lit_pool (void)
3006 literal_pool
* pool
;
3009 pool
= find_or_make_literal_pool ();
3011 /* Check if this literal value is already in the pool. */
3012 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3014 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3015 && (inst
.reloc
.exp
.X_op
== O_constant
)
3016 && (pool
->literals
[entry
].X_add_number
3017 == inst
.reloc
.exp
.X_add_number
)
3018 && (pool
->literals
[entry
].X_unsigned
3019 == inst
.reloc
.exp
.X_unsigned
))
3022 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3023 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3024 && (pool
->literals
[entry
].X_add_number
3025 == inst
.reloc
.exp
.X_add_number
)
3026 && (pool
->literals
[entry
].X_add_symbol
3027 == inst
.reloc
.exp
.X_add_symbol
)
3028 && (pool
->literals
[entry
].X_op_symbol
3029 == inst
.reloc
.exp
.X_op_symbol
))
3033 /* Do we need to create a new entry? */
3034 if (entry
== pool
->next_free_entry
)
3036 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3038 inst
.error
= _("literal pool overflow");
3042 pool
->literals
[entry
] = inst
.reloc
.exp
;
3043 pool
->next_free_entry
+= 1;
3046 inst
.reloc
.exp
.X_op
= O_symbol
;
3047 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
3048 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3053 /* Can't use symbol_new here, so have to create a symbol and then at
3054 a later date assign it a value. Thats what these functions do. */
3057 symbol_locate (symbolS
* symbolP
,
3058 const char * name
, /* It is copied, the caller can modify. */
3059 segT segment
, /* Segment identifier (SEG_<something>). */
3060 valueT valu
, /* Symbol value. */
3061 fragS
* frag
) /* Associated fragment. */
3063 unsigned int name_length
;
3064 char * preserved_copy_of_name
;
3066 name_length
= strlen (name
) + 1; /* +1 for \0. */
3067 obstack_grow (¬es
, name
, name_length
);
3068 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3070 #ifdef tc_canonicalize_symbol_name
3071 preserved_copy_of_name
=
3072 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3075 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3077 S_SET_SEGMENT (symbolP
, segment
);
3078 S_SET_VALUE (symbolP
, valu
);
3079 symbol_clear_list_pointers (symbolP
);
3081 symbol_set_frag (symbolP
, frag
);
3083 /* Link to end of symbol chain. */
3085 extern int symbol_table_frozen
;
3087 if (symbol_table_frozen
)
3091 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3093 obj_symbol_new_hook (symbolP
);
3095 #ifdef tc_symbol_new_hook
3096 tc_symbol_new_hook (symbolP
);
3100 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3101 #endif /* DEBUG_SYMS */
3106 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3109 literal_pool
* pool
;
3112 pool
= find_literal_pool ();
3114 || pool
->symbol
== NULL
3115 || pool
->next_free_entry
== 0)
3118 mapping_state (MAP_DATA
);
3120 /* Align pool as you have word accesses.
3121 Only make a frag if we have to. */
3123 frag_align (2, 0, 0);
3125 record_alignment (now_seg
, 2);
3127 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3129 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3130 (valueT
) frag_now_fix (), frag_now
);
3131 symbol_table_insert (pool
->symbol
);
3133 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3135 #if defined OBJ_COFF || defined OBJ_ELF
3136 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3139 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3140 /* First output the expression in the instruction to the pool. */
3141 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
3143 /* Mark the pool as empty. */
3144 pool
->next_free_entry
= 0;
3145 pool
->symbol
= NULL
;
3149 /* Forward declarations for functions below, in the MD interface
3151 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3152 static valueT
create_unwind_entry (int);
3153 static void start_unwind_section (const segT
, int);
3154 static void add_unwind_opcode (valueT
, int);
3155 static void flush_pending_unwind (void);
3157 /* Directives: Data. */
3160 s_arm_elf_cons (int nbytes
)
3164 #ifdef md_flush_pending_output
3165 md_flush_pending_output ();
3168 if (is_it_end_of_statement ())
3170 demand_empty_rest_of_line ();
3174 #ifdef md_cons_align
3175 md_cons_align (nbytes
);
3178 mapping_state (MAP_DATA
);
3182 char *base
= input_line_pointer
;
3186 if (exp
.X_op
!= O_symbol
)
3187 emit_expr (&exp
, (unsigned int) nbytes
);
3190 char *before_reloc
= input_line_pointer
;
3191 reloc
= parse_reloc (&input_line_pointer
);
3194 as_bad (_("unrecognized relocation suffix"));
3195 ignore_rest_of_line ();
3198 else if (reloc
== BFD_RELOC_UNUSED
)
3199 emit_expr (&exp
, (unsigned int) nbytes
);
3202 reloc_howto_type
*howto
= (reloc_howto_type
*)
3203 bfd_reloc_type_lookup (stdoutput
,
3204 (bfd_reloc_code_real_type
) reloc
);
3205 int size
= bfd_get_reloc_size (howto
);
3207 if (reloc
== BFD_RELOC_ARM_PLT32
)
3209 as_bad (_("(plt) is only valid on branch targets"));
3210 reloc
= BFD_RELOC_UNUSED
;
3215 as_bad (_("%s relocations do not fit in %d bytes"),
3216 howto
->name
, nbytes
);
3219 /* We've parsed an expression stopping at O_symbol.
3220 But there may be more expression left now that we
3221 have parsed the relocation marker. Parse it again.
3222 XXX Surely there is a cleaner way to do this. */
3223 char *p
= input_line_pointer
;
3225 char *save_buf
= (char *) alloca (input_line_pointer
- base
);
3226 memcpy (save_buf
, base
, input_line_pointer
- base
);
3227 memmove (base
+ (input_line_pointer
- before_reloc
),
3228 base
, before_reloc
- base
);
3230 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3232 memcpy (base
, save_buf
, p
- base
);
3234 offset
= nbytes
- size
;
3235 p
= frag_more ((int) nbytes
);
3236 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3237 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3242 while (*input_line_pointer
++ == ',');
3244 /* Put terminator back into stream. */
3245 input_line_pointer
--;
3246 demand_empty_rest_of_line ();
3249 /* Emit an expression containing a 32-bit thumb instruction.
3250 Implementation based on put_thumb32_insn. */
3253 emit_thumb32_expr (expressionS
* exp
)
3255 expressionS exp_high
= *exp
;
3257 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3258 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3259 exp
->X_add_number
&= 0xffff;
3260 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3263 /* Guess the instruction size based on the opcode. */
3266 thumb_insn_size (int opcode
)
3268 if ((unsigned int) opcode
< 0xe800u
)
3270 else if ((unsigned int) opcode
>= 0xe8000000u
)
3277 emit_insn (expressionS
*exp
, int nbytes
)
3281 if (exp
->X_op
== O_constant
)
3286 size
= thumb_insn_size (exp
->X_add_number
);
3290 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3292 as_bad (_(".inst.n operand too big. "\
3293 "Use .inst.w instead"));
3298 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3299 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3301 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3303 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3304 emit_thumb32_expr (exp
);
3306 emit_expr (exp
, (unsigned int) size
);
3308 it_fsm_post_encode ();
3312 as_bad (_("cannot determine Thumb instruction size. " \
3313 "Use .inst.n/.inst.w instead"));
3316 as_bad (_("constant expression required"));
3321 /* Like s_arm_elf_cons but do not use md_cons_align and
3322 set the mapping state to MAP_ARM/MAP_THUMB. */
3325 s_arm_elf_inst (int nbytes
)
3327 if (is_it_end_of_statement ())
3329 demand_empty_rest_of_line ();
3333 /* Calling mapping_state () here will not change ARM/THUMB,
3334 but will ensure not to be in DATA state. */
3337 mapping_state (MAP_THUMB
);
3342 as_bad (_("width suffixes are invalid in ARM mode"));
3343 ignore_rest_of_line ();
3349 mapping_state (MAP_ARM
);
3358 if (! emit_insn (& exp
, nbytes
))
3360 ignore_rest_of_line ();
3364 while (*input_line_pointer
++ == ',');
3366 /* Put terminator back into stream. */
3367 input_line_pointer
--;
3368 demand_empty_rest_of_line ();
3371 /* Parse a .rel31 directive. */
3374 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3381 if (*input_line_pointer
== '1')
3382 highbit
= 0x80000000;
3383 else if (*input_line_pointer
!= '0')
3384 as_bad (_("expected 0 or 1"));
3386 input_line_pointer
++;
3387 if (*input_line_pointer
!= ',')
3388 as_bad (_("missing comma"));
3389 input_line_pointer
++;
3391 #ifdef md_flush_pending_output
3392 md_flush_pending_output ();
3395 #ifdef md_cons_align
3399 mapping_state (MAP_DATA
);
3404 md_number_to_chars (p
, highbit
, 4);
3405 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3406 BFD_RELOC_ARM_PREL31
);
3408 demand_empty_rest_of_line ();
3411 /* Directives: AEABI stack-unwind tables. */
3413 /* Parse an unwind_fnstart directive. Simply records the current location. */
3416 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3418 demand_empty_rest_of_line ();
3419 if (unwind
.proc_start
)
3421 as_bad (_("duplicate .fnstart directive"));
3425 /* Mark the start of the function. */
3426 unwind
.proc_start
= expr_build_dot ();
3428 /* Reset the rest of the unwind info. */
3429 unwind
.opcode_count
= 0;
3430 unwind
.table_entry
= NULL
;
3431 unwind
.personality_routine
= NULL
;
3432 unwind
.personality_index
= -1;
3433 unwind
.frame_size
= 0;
3434 unwind
.fp_offset
= 0;
3435 unwind
.fp_reg
= REG_SP
;
3437 unwind
.sp_restored
= 0;
3441 /* Parse a handlerdata directive. Creates the exception handling table entry
3442 for the function. */
3445 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3447 demand_empty_rest_of_line ();
3448 if (!unwind
.proc_start
)
3449 as_bad (MISSING_FNSTART
);
3451 if (unwind
.table_entry
)
3452 as_bad (_("duplicate .handlerdata directive"));
3454 create_unwind_entry (1);
3457 /* Parse an unwind_fnend directive. Generates the index table entry. */
3460 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3465 unsigned int marked_pr_dependency
;
3467 demand_empty_rest_of_line ();
3469 if (!unwind
.proc_start
)
3471 as_bad (_(".fnend directive without .fnstart"));
3475 /* Add eh table entry. */
3476 if (unwind
.table_entry
== NULL
)
3477 val
= create_unwind_entry (0);
3481 /* Add index table entry. This is two words. */
3482 start_unwind_section (unwind
.saved_seg
, 1);
3483 frag_align (2, 0, 0);
3484 record_alignment (now_seg
, 2);
3486 ptr
= frag_more (8);
3487 where
= frag_now_fix () - 8;
3489 /* Self relative offset of the function start. */
3490 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3491 BFD_RELOC_ARM_PREL31
);
3493 /* Indicate dependency on EHABI-defined personality routines to the
3494 linker, if it hasn't been done already. */
3495 marked_pr_dependency
3496 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3497 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3498 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3500 static const char *const name
[] =
3502 "__aeabi_unwind_cpp_pr0",
3503 "__aeabi_unwind_cpp_pr1",
3504 "__aeabi_unwind_cpp_pr2"
3506 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3507 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3508 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3509 |= 1 << unwind
.personality_index
;
3513 /* Inline exception table entry. */
3514 md_number_to_chars (ptr
+ 4, val
, 4);
3516 /* Self relative offset of the table entry. */
3517 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3518 BFD_RELOC_ARM_PREL31
);
3520 /* Restore the original section. */
3521 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3523 unwind
.proc_start
= NULL
;
3527 /* Parse an unwind_cantunwind directive. */
3530 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3532 demand_empty_rest_of_line ();
3533 if (!unwind
.proc_start
)
3534 as_bad (MISSING_FNSTART
);
3536 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3537 as_bad (_("personality routine specified for cantunwind frame"));
3539 unwind
.personality_index
= -2;
3543 /* Parse a personalityindex directive. */
3546 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3550 if (!unwind
.proc_start
)
3551 as_bad (MISSING_FNSTART
);
3553 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3554 as_bad (_("duplicate .personalityindex directive"));
3558 if (exp
.X_op
!= O_constant
3559 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3561 as_bad (_("bad personality routine number"));
3562 ignore_rest_of_line ();
3566 unwind
.personality_index
= exp
.X_add_number
;
3568 demand_empty_rest_of_line ();
3572 /* Parse a personality directive. */
3575 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3579 if (!unwind
.proc_start
)
3580 as_bad (MISSING_FNSTART
);
3582 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3583 as_bad (_("duplicate .personality directive"));
3585 name
= input_line_pointer
;
3586 c
= get_symbol_end ();
3587 p
= input_line_pointer
;
3588 unwind
.personality_routine
= symbol_find_or_make (name
);
3590 demand_empty_rest_of_line ();
3594 /* Parse a directive saving core registers. */
3597 s_arm_unwind_save_core (void)
3603 range
= parse_reg_list (&input_line_pointer
);
3606 as_bad (_("expected register list"));
3607 ignore_rest_of_line ();
3611 demand_empty_rest_of_line ();
3613 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3614 into .unwind_save {..., sp...}. We aren't bothered about the value of
3615 ip because it is clobbered by calls. */
3616 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3617 && (range
& 0x3000) == 0x1000)
3619 unwind
.opcode_count
--;
3620 unwind
.sp_restored
= 0;
3621 range
= (range
| 0x2000) & ~0x1000;
3622 unwind
.pending_offset
= 0;
3628 /* See if we can use the short opcodes. These pop a block of up to 8
3629 registers starting with r4, plus maybe r14. */
3630 for (n
= 0; n
< 8; n
++)
3632 /* Break at the first non-saved register. */
3633 if ((range
& (1 << (n
+ 4))) == 0)
3636 /* See if there are any other bits set. */
3637 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3639 /* Use the long form. */
3640 op
= 0x8000 | ((range
>> 4) & 0xfff);
3641 add_unwind_opcode (op
, 2);
3645 /* Use the short form. */
3647 op
= 0xa8; /* Pop r14. */
3649 op
= 0xa0; /* Do not pop r14. */
3651 add_unwind_opcode (op
, 1);
3658 op
= 0xb100 | (range
& 0xf);
3659 add_unwind_opcode (op
, 2);
3662 /* Record the number of bytes pushed. */
3663 for (n
= 0; n
< 16; n
++)
3665 if (range
& (1 << n
))
3666 unwind
.frame_size
+= 4;
3671 /* Parse a directive saving FPA registers. */
3674 s_arm_unwind_save_fpa (int reg
)
3680 /* Get Number of registers to transfer. */
3681 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3684 exp
.X_op
= O_illegal
;
3686 if (exp
.X_op
!= O_constant
)
3688 as_bad (_("expected , <constant>"));
3689 ignore_rest_of_line ();
3693 num_regs
= exp
.X_add_number
;
3695 if (num_regs
< 1 || num_regs
> 4)
3697 as_bad (_("number of registers must be in the range [1:4]"));
3698 ignore_rest_of_line ();
3702 demand_empty_rest_of_line ();
3707 op
= 0xb4 | (num_regs
- 1);
3708 add_unwind_opcode (op
, 1);
3713 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3714 add_unwind_opcode (op
, 2);
3716 unwind
.frame_size
+= num_regs
* 12;
3720 /* Parse a directive saving VFP registers for ARMv6 and above. */
3723 s_arm_unwind_save_vfp_armv6 (void)
3728 int num_vfpv3_regs
= 0;
3729 int num_regs_below_16
;
3731 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3734 as_bad (_("expected register list"));
3735 ignore_rest_of_line ();
3739 demand_empty_rest_of_line ();
3741 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3742 than FSTMX/FLDMX-style ones). */
3744 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3746 num_vfpv3_regs
= count
;
3747 else if (start
+ count
> 16)
3748 num_vfpv3_regs
= start
+ count
- 16;
3750 if (num_vfpv3_regs
> 0)
3752 int start_offset
= start
> 16 ? start
- 16 : 0;
3753 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3754 add_unwind_opcode (op
, 2);
3757 /* Generate opcode for registers numbered in the range 0 .. 15. */
3758 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3759 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3760 if (num_regs_below_16
> 0)
3762 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3763 add_unwind_opcode (op
, 2);
3766 unwind
.frame_size
+= count
* 8;
3770 /* Parse a directive saving VFP registers for pre-ARMv6. */
3773 s_arm_unwind_save_vfp (void)
3779 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3782 as_bad (_("expected register list"));
3783 ignore_rest_of_line ();
3787 demand_empty_rest_of_line ();
3792 op
= 0xb8 | (count
- 1);
3793 add_unwind_opcode (op
, 1);
3798 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3799 add_unwind_opcode (op
, 2);
3801 unwind
.frame_size
+= count
* 8 + 4;
3805 /* Parse a directive saving iWMMXt data registers. */
3808 s_arm_unwind_save_mmxwr (void)
3816 if (*input_line_pointer
== '{')
3817 input_line_pointer
++;
3821 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3825 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3830 as_tsktsk (_("register list not in ascending order"));
3833 if (*input_line_pointer
== '-')
3835 input_line_pointer
++;
3836 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3839 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3842 else if (reg
>= hi_reg
)
3844 as_bad (_("bad register range"));
3847 for (; reg
< hi_reg
; reg
++)
3851 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3853 if (*input_line_pointer
== '}')
3854 input_line_pointer
++;
3856 demand_empty_rest_of_line ();
3858 /* Generate any deferred opcodes because we're going to be looking at
3860 flush_pending_unwind ();
3862 for (i
= 0; i
< 16; i
++)
3864 if (mask
& (1 << i
))
3865 unwind
.frame_size
+= 8;
3868 /* Attempt to combine with a previous opcode. We do this because gcc
3869 likes to output separate unwind directives for a single block of
3871 if (unwind
.opcode_count
> 0)
3873 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3874 if ((i
& 0xf8) == 0xc0)
3877 /* Only merge if the blocks are contiguous. */
3880 if ((mask
& 0xfe00) == (1 << 9))
3882 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3883 unwind
.opcode_count
--;
3886 else if (i
== 6 && unwind
.opcode_count
>= 2)
3888 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3892 op
= 0xffff << (reg
- 1);
3894 && ((mask
& op
) == (1u << (reg
- 1))))
3896 op
= (1 << (reg
+ i
+ 1)) - 1;
3897 op
&= ~((1 << reg
) - 1);
3899 unwind
.opcode_count
-= 2;
3906 /* We want to generate opcodes in the order the registers have been
3907 saved, ie. descending order. */
3908 for (reg
= 15; reg
>= -1; reg
--)
3910 /* Save registers in blocks. */
3912 || !(mask
& (1 << reg
)))
3914 /* We found an unsaved reg. Generate opcodes to save the
3921 op
= 0xc0 | (hi_reg
- 10);
3922 add_unwind_opcode (op
, 1);
3927 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3928 add_unwind_opcode (op
, 2);
3937 ignore_rest_of_line ();
3941 s_arm_unwind_save_mmxwcg (void)
3948 if (*input_line_pointer
== '{')
3949 input_line_pointer
++;
3953 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3957 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3963 as_tsktsk (_("register list not in ascending order"));
3966 if (*input_line_pointer
== '-')
3968 input_line_pointer
++;
3969 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3972 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3975 else if (reg
>= hi_reg
)
3977 as_bad (_("bad register range"));
3980 for (; reg
< hi_reg
; reg
++)
3984 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3986 if (*input_line_pointer
== '}')
3987 input_line_pointer
++;
3989 demand_empty_rest_of_line ();
3991 /* Generate any deferred opcodes because we're going to be looking at
3993 flush_pending_unwind ();
3995 for (reg
= 0; reg
< 16; reg
++)
3997 if (mask
& (1 << reg
))
3998 unwind
.frame_size
+= 4;
4001 add_unwind_opcode (op
, 2);
4004 ignore_rest_of_line ();
4008 /* Parse an unwind_save directive.
4009 If the argument is non-zero, this is a .vsave directive. */
4012 s_arm_unwind_save (int arch_v6
)
4015 struct reg_entry
*reg
;
4016 bfd_boolean had_brace
= FALSE
;
4018 if (!unwind
.proc_start
)
4019 as_bad (MISSING_FNSTART
);
4021 /* Figure out what sort of save we have. */
4022 peek
= input_line_pointer
;
4030 reg
= arm_reg_parse_multi (&peek
);
4034 as_bad (_("register expected"));
4035 ignore_rest_of_line ();
4044 as_bad (_("FPA .unwind_save does not take a register list"));
4045 ignore_rest_of_line ();
4048 input_line_pointer
= peek
;
4049 s_arm_unwind_save_fpa (reg
->number
);
4052 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
4055 s_arm_unwind_save_vfp_armv6 ();
4057 s_arm_unwind_save_vfp ();
4059 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
4060 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
4063 as_bad (_(".unwind_save does not support this kind of register"));
4064 ignore_rest_of_line ();
4069 /* Parse an unwind_movsp directive. */
4072 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4078 if (!unwind
.proc_start
)
4079 as_bad (MISSING_FNSTART
);
4081 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4084 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4085 ignore_rest_of_line ();
4089 /* Optional constant. */
4090 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4092 if (immediate_for_directive (&offset
) == FAIL
)
4098 demand_empty_rest_of_line ();
4100 if (reg
== REG_SP
|| reg
== REG_PC
)
4102 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4106 if (unwind
.fp_reg
!= REG_SP
)
4107 as_bad (_("unexpected .unwind_movsp directive"));
4109 /* Generate opcode to restore the value. */
4111 add_unwind_opcode (op
, 1);
4113 /* Record the information for later. */
4114 unwind
.fp_reg
= reg
;
4115 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4116 unwind
.sp_restored
= 1;
4119 /* Parse an unwind_pad directive. */
4122 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4126 if (!unwind
.proc_start
)
4127 as_bad (MISSING_FNSTART
);
4129 if (immediate_for_directive (&offset
) == FAIL
)
4134 as_bad (_("stack increment must be multiple of 4"));
4135 ignore_rest_of_line ();
4139 /* Don't generate any opcodes, just record the details for later. */
4140 unwind
.frame_size
+= offset
;
4141 unwind
.pending_offset
+= offset
;
4143 demand_empty_rest_of_line ();
4146 /* Parse an unwind_setfp directive. */
4149 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4155 if (!unwind
.proc_start
)
4156 as_bad (MISSING_FNSTART
);
4158 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4159 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4162 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4164 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4166 as_bad (_("expected <reg>, <reg>"));
4167 ignore_rest_of_line ();
4171 /* Optional constant. */
4172 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4174 if (immediate_for_directive (&offset
) == FAIL
)
4180 demand_empty_rest_of_line ();
4182 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4184 as_bad (_("register must be either sp or set by a previous"
4185 "unwind_movsp directive"));
4189 /* Don't generate any opcodes, just record the information for later. */
4190 unwind
.fp_reg
= fp_reg
;
4192 if (sp_reg
== REG_SP
)
4193 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4195 unwind
.fp_offset
-= offset
;
4198 /* Parse an unwind_raw directive. */
4201 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4204 /* This is an arbitrary limit. */
4205 unsigned char op
[16];
4208 if (!unwind
.proc_start
)
4209 as_bad (MISSING_FNSTART
);
4212 if (exp
.X_op
== O_constant
4213 && skip_past_comma (&input_line_pointer
) != FAIL
)
4215 unwind
.frame_size
+= exp
.X_add_number
;
4219 exp
.X_op
= O_illegal
;
4221 if (exp
.X_op
!= O_constant
)
4223 as_bad (_("expected <offset>, <opcode>"));
4224 ignore_rest_of_line ();
4230 /* Parse the opcode. */
4235 as_bad (_("unwind opcode too long"));
4236 ignore_rest_of_line ();
4238 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4240 as_bad (_("invalid unwind opcode"));
4241 ignore_rest_of_line ();
4244 op
[count
++] = exp
.X_add_number
;
4246 /* Parse the next byte. */
4247 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4253 /* Add the opcode bytes in reverse order. */
4255 add_unwind_opcode (op
[count
], 1);
4257 demand_empty_rest_of_line ();
4261 /* Parse a .eabi_attribute directive. */
4264 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4266 int tag
= s_vendor_attribute (OBJ_ATTR_PROC
);
4268 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4269 attributes_set_explicitly
[tag
] = 1;
4271 #endif /* OBJ_ELF */
4273 static void s_arm_arch (int);
4274 static void s_arm_object_arch (int);
4275 static void s_arm_cpu (int);
4276 static void s_arm_fpu (int);
4277 static void s_arm_arch_extension (int);
4282 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4289 if (exp
.X_op
== O_symbol
)
4290 exp
.X_op
= O_secrel
;
4292 emit_expr (&exp
, 4);
4294 while (*input_line_pointer
++ == ',');
4296 input_line_pointer
--;
4297 demand_empty_rest_of_line ();
4301 /* This table describes all the machine specific pseudo-ops the assembler
4302 has to support. The fields are:
4303 pseudo-op name without dot
4304 function to call to execute this pseudo-op
4305 Integer arg to pass to the function. */
4307 const pseudo_typeS md_pseudo_table
[] =
4309 /* Never called because '.req' does not start a line. */
4310 { "req", s_req
, 0 },
4311 /* Following two are likewise never called. */
4314 { "unreq", s_unreq
, 0 },
4315 { "bss", s_bss
, 0 },
4316 { "align", s_align
, 0 },
4317 { "arm", s_arm
, 0 },
4318 { "thumb", s_thumb
, 0 },
4319 { "code", s_code
, 0 },
4320 { "force_thumb", s_force_thumb
, 0 },
4321 { "thumb_func", s_thumb_func
, 0 },
4322 { "thumb_set", s_thumb_set
, 0 },
4323 { "even", s_even
, 0 },
4324 { "ltorg", s_ltorg
, 0 },
4325 { "pool", s_ltorg
, 0 },
4326 { "syntax", s_syntax
, 0 },
4327 { "cpu", s_arm_cpu
, 0 },
4328 { "arch", s_arm_arch
, 0 },
4329 { "object_arch", s_arm_object_arch
, 0 },
4330 { "fpu", s_arm_fpu
, 0 },
4331 { "arch_extension", s_arm_arch_extension
, 0 },
4333 { "word", s_arm_elf_cons
, 4 },
4334 { "long", s_arm_elf_cons
, 4 },
4335 { "inst.n", s_arm_elf_inst
, 2 },
4336 { "inst.w", s_arm_elf_inst
, 4 },
4337 { "inst", s_arm_elf_inst
, 0 },
4338 { "rel31", s_arm_rel31
, 0 },
4339 { "fnstart", s_arm_unwind_fnstart
, 0 },
4340 { "fnend", s_arm_unwind_fnend
, 0 },
4341 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4342 { "personality", s_arm_unwind_personality
, 0 },
4343 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4344 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4345 { "save", s_arm_unwind_save
, 0 },
4346 { "vsave", s_arm_unwind_save
, 1 },
4347 { "movsp", s_arm_unwind_movsp
, 0 },
4348 { "pad", s_arm_unwind_pad
, 0 },
4349 { "setfp", s_arm_unwind_setfp
, 0 },
4350 { "unwind_raw", s_arm_unwind_raw
, 0 },
4351 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4355 /* These are used for dwarf. */
4359 /* These are used for dwarf2. */
4360 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4361 { "loc", dwarf2_directive_loc
, 0 },
4362 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4364 { "extend", float_cons
, 'x' },
4365 { "ldouble", float_cons
, 'x' },
4366 { "packed", float_cons
, 'p' },
4368 {"secrel32", pe_directive_secrel
, 0},
4373 /* Parser functions used exclusively in instruction operands. */
4375 /* Generic immediate-value read function for use in insn parsing.
4376 STR points to the beginning of the immediate (the leading #);
4377 VAL receives the value; if the value is outside [MIN, MAX]
4378 issue an error. PREFIX_OPT is true if the immediate prefix is
4382 parse_immediate (char **str
, int *val
, int min
, int max
,
4383 bfd_boolean prefix_opt
)
4386 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4387 if (exp
.X_op
!= O_constant
)
4389 inst
.error
= _("constant expression required");
4393 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4395 inst
.error
= _("immediate value out of range");
4399 *val
= exp
.X_add_number
;
4403 /* Less-generic immediate-value read function with the possibility of loading a
4404 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4405 instructions. Puts the result directly in inst.operands[i]. */
4408 parse_big_immediate (char **str
, int i
)
4413 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4415 if (exp
.X_op
== O_constant
)
4417 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4418 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4419 O_constant. We have to be careful not to break compilation for
4420 32-bit X_add_number, though. */
4421 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4423 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4424 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4425 inst
.operands
[i
].regisimm
= 1;
4428 else if (exp
.X_op
== O_big
4429 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32)
4431 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4433 /* Bignums have their least significant bits in
4434 generic_bignum[0]. Make sure we put 32 bits in imm and
4435 32 bits in reg, in a (hopefully) portable way. */
4436 gas_assert (parts
!= 0);
4438 /* Make sure that the number is not too big.
4439 PR 11972: Bignums can now be sign-extended to the
4440 size of a .octa so check that the out of range bits
4441 are all zero or all one. */
4442 if (LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 64)
4444 LITTLENUM_TYPE m
= -1;
4446 if (generic_bignum
[parts
* 2] != 0
4447 && generic_bignum
[parts
* 2] != m
)
4450 for (j
= parts
* 2 + 1; j
< (unsigned) exp
.X_add_number
; j
++)
4451 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4455 inst
.operands
[i
].imm
= 0;
4456 for (j
= 0; j
< parts
; j
++, idx
++)
4457 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4458 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4459 inst
.operands
[i
].reg
= 0;
4460 for (j
= 0; j
< parts
; j
++, idx
++)
4461 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4462 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4463 inst
.operands
[i
].regisimm
= 1;
4473 /* Returns the pseudo-register number of an FPA immediate constant,
4474 or FAIL if there isn't a valid constant here. */
4477 parse_fpa_immediate (char ** str
)
4479 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4485 /* First try and match exact strings, this is to guarantee
4486 that some formats will work even for cross assembly. */
4488 for (i
= 0; fp_const
[i
]; i
++)
4490 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4494 *str
+= strlen (fp_const
[i
]);
4495 if (is_end_of_line
[(unsigned char) **str
])
4501 /* Just because we didn't get a match doesn't mean that the constant
4502 isn't valid, just that it is in a format that we don't
4503 automatically recognize. Try parsing it with the standard
4504 expression routines. */
4506 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4508 /* Look for a raw floating point number. */
4509 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4510 && is_end_of_line
[(unsigned char) *save_in
])
4512 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4514 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4516 if (words
[j
] != fp_values
[i
][j
])
4520 if (j
== MAX_LITTLENUMS
)
4528 /* Try and parse a more complex expression, this will probably fail
4529 unless the code uses a floating point prefix (eg "0f"). */
4530 save_in
= input_line_pointer
;
4531 input_line_pointer
= *str
;
4532 if (expression (&exp
) == absolute_section
4533 && exp
.X_op
== O_big
4534 && exp
.X_add_number
< 0)
4536 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4538 if (gen_to_words (words
, 5, (long) 15) == 0)
4540 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4542 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4544 if (words
[j
] != fp_values
[i
][j
])
4548 if (j
== MAX_LITTLENUMS
)
4550 *str
= input_line_pointer
;
4551 input_line_pointer
= save_in
;
4558 *str
= input_line_pointer
;
4559 input_line_pointer
= save_in
;
4560 inst
.error
= _("invalid FPA immediate expression");
4564 /* Returns 1 if a number has "quarter-precision" float format
4565 0baBbbbbbc defgh000 00000000 00000000. */
4568 is_quarter_float (unsigned imm
)
4570 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4571 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4574 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4575 0baBbbbbbc defgh000 00000000 00000000.
4576 The zero and minus-zero cases need special handling, since they can't be
4577 encoded in the "quarter-precision" float format, but can nonetheless be
4578 loaded as integer constants. */
4581 parse_qfloat_immediate (char **ccp
, int *immed
)
4585 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4586 int found_fpchar
= 0;
4588 skip_past_char (&str
, '#');
4590 /* We must not accidentally parse an integer as a floating-point number. Make
4591 sure that the value we parse is not an integer by checking for special
4592 characters '.' or 'e'.
4593 FIXME: This is a horrible hack, but doing better is tricky because type
4594 information isn't in a very usable state at parse time. */
4596 skip_whitespace (fpnum
);
4598 if (strncmp (fpnum
, "0x", 2) == 0)
4602 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4603 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4613 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4615 unsigned fpword
= 0;
4618 /* Our FP word must be 32 bits (single-precision FP). */
4619 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4621 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4625 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
4638 /* Shift operands. */
4641 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4644 struct asm_shift_name
4647 enum shift_kind kind
;
4650 /* Third argument to parse_shift. */
4651 enum parse_shift_mode
4653 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4654 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4655 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4656 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4657 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4660 /* Parse a <shift> specifier on an ARM data processing instruction.
4661 This has three forms:
4663 (LSL|LSR|ASL|ASR|ROR) Rs
4664 (LSL|LSR|ASL|ASR|ROR) #imm
4667 Note that ASL is assimilated to LSL in the instruction encoding, and
4668 RRX to ROR #0 (which cannot be written as such). */
4671 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4673 const struct asm_shift_name
*shift_name
;
4674 enum shift_kind shift
;
4679 for (p
= *str
; ISALPHA (*p
); p
++)
4684 inst
.error
= _("shift expression expected");
4688 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
4691 if (shift_name
== NULL
)
4693 inst
.error
= _("shift expression expected");
4697 shift
= shift_name
->kind
;
4701 case NO_SHIFT_RESTRICT
:
4702 case SHIFT_IMMEDIATE
: break;
4704 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4705 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4707 inst
.error
= _("'LSL' or 'ASR' required");
4712 case SHIFT_LSL_IMMEDIATE
:
4713 if (shift
!= SHIFT_LSL
)
4715 inst
.error
= _("'LSL' required");
4720 case SHIFT_ASR_IMMEDIATE
:
4721 if (shift
!= SHIFT_ASR
)
4723 inst
.error
= _("'ASR' required");
4731 if (shift
!= SHIFT_RRX
)
4733 /* Whitespace can appear here if the next thing is a bare digit. */
4734 skip_whitespace (p
);
4736 if (mode
== NO_SHIFT_RESTRICT
4737 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4739 inst
.operands
[i
].imm
= reg
;
4740 inst
.operands
[i
].immisreg
= 1;
4742 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4745 inst
.operands
[i
].shift_kind
= shift
;
4746 inst
.operands
[i
].shifted
= 1;
4751 /* Parse a <shifter_operand> for an ARM data processing instruction:
4754 #<immediate>, <rotate>
4758 where <shift> is defined by parse_shift above, and <rotate> is a
4759 multiple of 2 between 0 and 30. Validation of immediate operands
4760 is deferred to md_apply_fix. */
4763 parse_shifter_operand (char **str
, int i
)
4768 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4770 inst
.operands
[i
].reg
= value
;
4771 inst
.operands
[i
].isreg
= 1;
4773 /* parse_shift will override this if appropriate */
4774 inst
.reloc
.exp
.X_op
= O_constant
;
4775 inst
.reloc
.exp
.X_add_number
= 0;
4777 if (skip_past_comma (str
) == FAIL
)
4780 /* Shift operation on register. */
4781 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4784 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4787 if (skip_past_comma (str
) == SUCCESS
)
4789 /* #x, y -- ie explicit rotation by Y. */
4790 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
4793 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4795 inst
.error
= _("constant expression expected");
4799 value
= exp
.X_add_number
;
4800 if (value
< 0 || value
> 30 || value
% 2 != 0)
4802 inst
.error
= _("invalid rotation");
4805 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4807 inst
.error
= _("invalid constant");
4811 /* Convert to decoded value. md_apply_fix will put it back. */
4812 inst
.reloc
.exp
.X_add_number
4813 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4814 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4817 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4818 inst
.reloc
.pc_rel
= 0;
4822 /* Group relocation information. Each entry in the table contains the
4823 textual name of the relocation as may appear in assembler source
4824 and must end with a colon.
4825 Along with this textual name are the relocation codes to be used if
4826 the corresponding instruction is an ALU instruction (ADD or SUB only),
4827 an LDR, an LDRS, or an LDC. */
4829 struct group_reloc_table_entry
4840 /* Varieties of non-ALU group relocation. */
4847 static struct group_reloc_table_entry group_reloc_table
[] =
4848 { /* Program counter relative: */
4850 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4855 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4856 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4857 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4858 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4860 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4865 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4866 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4867 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4868 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4870 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4871 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4872 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4873 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4874 /* Section base relative */
4876 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4881 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4882 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4883 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4884 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4886 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4891 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4892 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4893 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4894 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4896 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4897 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4898 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4899 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4901 /* Given the address of a pointer pointing to the textual name of a group
4902 relocation as may appear in assembler source, attempt to find its details
4903 in group_reloc_table. The pointer will be updated to the character after
4904 the trailing colon. On failure, FAIL will be returned; SUCCESS
4905 otherwise. On success, *entry will be updated to point at the relevant
4906 group_reloc_table entry. */
4909 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4912 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4914 int length
= strlen (group_reloc_table
[i
].name
);
4916 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
4917 && (*str
)[length
] == ':')
4919 *out
= &group_reloc_table
[i
];
4920 *str
+= (length
+ 1);
4928 /* Parse a <shifter_operand> for an ARM data processing instruction
4929 (as for parse_shifter_operand) where group relocations are allowed:
4932 #<immediate>, <rotate>
4933 #:<group_reloc>:<expression>
4937 where <group_reloc> is one of the strings defined in group_reloc_table.
4938 The hashes are optional.
4940 Everything else is as for parse_shifter_operand. */
4942 static parse_operand_result
4943 parse_shifter_operand_group_reloc (char **str
, int i
)
4945 /* Determine if we have the sequence of characters #: or just :
4946 coming next. If we do, then we check for a group relocation.
4947 If we don't, punt the whole lot to parse_shifter_operand. */
4949 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4950 || (*str
)[0] == ':')
4952 struct group_reloc_table_entry
*entry
;
4954 if ((*str
)[0] == '#')
4959 /* Try to parse a group relocation. Anything else is an error. */
4960 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4962 inst
.error
= _("unknown group relocation");
4963 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4966 /* We now have the group relocation table entry corresponding to
4967 the name in the assembler source. Next, we parse the expression. */
4968 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4969 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4971 /* Record the relocation type (always the ALU variant here). */
4972 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
4973 gas_assert (inst
.reloc
.type
!= 0);
4975 return PARSE_OPERAND_SUCCESS
;
4978 return parse_shifter_operand (str
, i
) == SUCCESS
4979 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4981 /* Never reached. */
4984 /* Parse a Neon alignment expression. Information is written to
4985 inst.operands[i]. We assume the initial ':' has been skipped.
4987 align .imm = align << 8, .immisalign=1, .preind=0 */
4988 static parse_operand_result
4989 parse_neon_alignment (char **str
, int i
)
4994 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4996 if (exp
.X_op
!= O_constant
)
4998 inst
.error
= _("alignment must be constant");
4999 return PARSE_OPERAND_FAIL
;
5002 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5003 inst
.operands
[i
].immisalign
= 1;
5004 /* Alignments are not pre-indexes. */
5005 inst
.operands
[i
].preind
= 0;
5008 return PARSE_OPERAND_SUCCESS
;
5011 /* Parse all forms of an ARM address expression. Information is written
5012 to inst.operands[i] and/or inst.reloc.
5014 Preindexed addressing (.preind=1):
5016 [Rn, #offset] .reg=Rn .reloc.exp=offset
5017 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5018 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5019 .shift_kind=shift .reloc.exp=shift_imm
5021 These three may have a trailing ! which causes .writeback to be set also.
5023 Postindexed addressing (.postind=1, .writeback=1):
5025 [Rn], #offset .reg=Rn .reloc.exp=offset
5026 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5027 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5028 .shift_kind=shift .reloc.exp=shift_imm
5030 Unindexed addressing (.preind=0, .postind=0):
5032 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5036 [Rn]{!} shorthand for [Rn,#0]{!}
5037 =immediate .isreg=0 .reloc.exp=immediate
5038 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5040 It is the caller's responsibility to check for addressing modes not
5041 supported by the instruction, and to set inst.reloc.type. */
5043 static parse_operand_result
5044 parse_address_main (char **str
, int i
, int group_relocations
,
5045 group_reloc_type group_type
)
5050 if (skip_past_char (&p
, '[') == FAIL
)
5052 if (skip_past_char (&p
, '=') == FAIL
)
5054 /* Bare address - translate to PC-relative offset. */
5055 inst
.reloc
.pc_rel
= 1;
5056 inst
.operands
[i
].reg
= REG_PC
;
5057 inst
.operands
[i
].isreg
= 1;
5058 inst
.operands
[i
].preind
= 1;
5060 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
5062 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5063 return PARSE_OPERAND_FAIL
;
5066 return PARSE_OPERAND_SUCCESS
;
5069 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5071 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5072 return PARSE_OPERAND_FAIL
;
5074 inst
.operands
[i
].reg
= reg
;
5075 inst
.operands
[i
].isreg
= 1;
5077 if (skip_past_comma (&p
) == SUCCESS
)
5079 inst
.operands
[i
].preind
= 1;
5082 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5084 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5086 inst
.operands
[i
].imm
= reg
;
5087 inst
.operands
[i
].immisreg
= 1;
5089 if (skip_past_comma (&p
) == SUCCESS
)
5090 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5091 return PARSE_OPERAND_FAIL
;
5093 else if (skip_past_char (&p
, ':') == SUCCESS
)
5095 /* FIXME: '@' should be used here, but it's filtered out by generic
5096 code before we get to see it here. This may be subject to
5098 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5100 if (result
!= PARSE_OPERAND_SUCCESS
)
5105 if (inst
.operands
[i
].negative
)
5107 inst
.operands
[i
].negative
= 0;
5111 if (group_relocations
5112 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5114 struct group_reloc_table_entry
*entry
;
5116 /* Skip over the #: or : sequence. */
5122 /* Try to parse a group relocation. Anything else is an
5124 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5126 inst
.error
= _("unknown group relocation");
5127 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5130 /* We now have the group relocation table entry corresponding to
5131 the name in the assembler source. Next, we parse the
5133 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5134 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5136 /* Record the relocation type. */
5140 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5144 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5148 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5155 if (inst
.reloc
.type
== 0)
5157 inst
.error
= _("this group relocation is not allowed on this instruction");
5158 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5162 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5163 return PARSE_OPERAND_FAIL
;
5166 else if (skip_past_char (&p
, ':') == SUCCESS
)
5168 /* FIXME: '@' should be used here, but it's filtered out by generic code
5169 before we get to see it here. This may be subject to change. */
5170 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5172 if (result
!= PARSE_OPERAND_SUCCESS
)
5176 if (skip_past_char (&p
, ']') == FAIL
)
5178 inst
.error
= _("']' expected");
5179 return PARSE_OPERAND_FAIL
;
5182 if (skip_past_char (&p
, '!') == SUCCESS
)
5183 inst
.operands
[i
].writeback
= 1;
5185 else if (skip_past_comma (&p
) == SUCCESS
)
5187 if (skip_past_char (&p
, '{') == SUCCESS
)
5189 /* [Rn], {expr} - unindexed, with option */
5190 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5191 0, 255, TRUE
) == FAIL
)
5192 return PARSE_OPERAND_FAIL
;
5194 if (skip_past_char (&p
, '}') == FAIL
)
5196 inst
.error
= _("'}' expected at end of 'option' field");
5197 return PARSE_OPERAND_FAIL
;
5199 if (inst
.operands
[i
].preind
)
5201 inst
.error
= _("cannot combine index with option");
5202 return PARSE_OPERAND_FAIL
;
5205 return PARSE_OPERAND_SUCCESS
;
5209 inst
.operands
[i
].postind
= 1;
5210 inst
.operands
[i
].writeback
= 1;
5212 if (inst
.operands
[i
].preind
)
5214 inst
.error
= _("cannot combine pre- and post-indexing");
5215 return PARSE_OPERAND_FAIL
;
5219 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5221 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5223 /* We might be using the immediate for alignment already. If we
5224 are, OR the register number into the low-order bits. */
5225 if (inst
.operands
[i
].immisalign
)
5226 inst
.operands
[i
].imm
|= reg
;
5228 inst
.operands
[i
].imm
= reg
;
5229 inst
.operands
[i
].immisreg
= 1;
5231 if (skip_past_comma (&p
) == SUCCESS
)
5232 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5233 return PARSE_OPERAND_FAIL
;
5237 if (inst
.operands
[i
].negative
)
5239 inst
.operands
[i
].negative
= 0;
5242 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5243 return PARSE_OPERAND_FAIL
;
5248 /* If at this point neither .preind nor .postind is set, we have a
5249 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5250 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5252 inst
.operands
[i
].preind
= 1;
5253 inst
.reloc
.exp
.X_op
= O_constant
;
5254 inst
.reloc
.exp
.X_add_number
= 0;
5257 return PARSE_OPERAND_SUCCESS
;
5261 parse_address (char **str
, int i
)
5263 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5267 static parse_operand_result
5268 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5270 return parse_address_main (str
, i
, 1, type
);
5273 /* Parse an operand for a MOVW or MOVT instruction. */
5275 parse_half (char **str
)
5280 skip_past_char (&p
, '#');
5281 if (strncasecmp (p
, ":lower16:", 9) == 0)
5282 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5283 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5284 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5286 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5289 skip_whitespace (p
);
5292 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5295 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5297 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5299 inst
.error
= _("constant expression expected");
5302 if (inst
.reloc
.exp
.X_add_number
< 0
5303 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5305 inst
.error
= _("immediate value out of range");
5313 /* Miscellaneous. */
5315 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5316 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5318 parse_psr (char **str
)
5321 unsigned long psr_field
;
5322 const struct asm_psr
*psr
;
5325 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5326 feature for ease of use and backwards compatibility. */
5328 if (strncasecmp (p
, "SPSR", 4) == 0)
5329 psr_field
= SPSR_BIT
;
5330 else if (strncasecmp (p
, "CPSR", 4) == 0
5331 || (strncasecmp (p
, "APSR", 4) == 0
5332 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
)))
5339 while (ISALNUM (*p
) || *p
== '_');
5341 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5353 /* A suffix follows. */
5359 while (ISALNUM (*p
) || *p
== '_');
5361 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5366 psr_field
|= psr
->field
;
5371 goto error
; /* Garbage after "[CS]PSR". */
5373 psr_field
|= (PSR_c
| PSR_f
);
5379 inst
.error
= _("flag for {c}psr instruction expected");
5383 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5384 value suitable for splatting into the AIF field of the instruction. */
5387 parse_cps_flags (char **str
)
5396 case '\0': case ',':
5399 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5400 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5401 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5404 inst
.error
= _("unrecognized CPS flag");
5409 if (saw_a_flag
== 0)
5411 inst
.error
= _("missing CPS flags");
5419 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5420 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5423 parse_endian_specifier (char **str
)
5428 if (strncasecmp (s
, "BE", 2))
5430 else if (strncasecmp (s
, "LE", 2))
5434 inst
.error
= _("valid endian specifiers are be or le");
5438 if (ISALNUM (s
[2]) || s
[2] == '_')
5440 inst
.error
= _("valid endian specifiers are be or le");
5445 return little_endian
;
5448 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5449 value suitable for poking into the rotate field of an sxt or sxta
5450 instruction, or FAIL on error. */
5453 parse_ror (char **str
)
5458 if (strncasecmp (s
, "ROR", 3) == 0)
5462 inst
.error
= _("missing rotation field after comma");
5466 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5471 case 0: *str
= s
; return 0x0;
5472 case 8: *str
= s
; return 0x1;
5473 case 16: *str
= s
; return 0x2;
5474 case 24: *str
= s
; return 0x3;
5477 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5482 /* Parse a conditional code (from conds[] below). The value returned is in the
5483 range 0 .. 14, or FAIL. */
5485 parse_cond (char **str
)
5488 const struct asm_cond
*c
;
5490 /* Condition codes are always 2 characters, so matching up to
5491 3 characters is sufficient. */
5496 while (ISALPHA (*q
) && n
< 3)
5498 cond
[n
] = TOLOWER (*q
);
5503 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
5506 inst
.error
= _("condition required");
5514 /* Parse an option for a barrier instruction. Returns the encoding for the
5517 parse_barrier (char **str
)
5520 const struct asm_barrier_opt
*o
;
5523 while (ISALPHA (*q
))
5526 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
5535 /* Parse the operands of a table branch instruction. Similar to a memory
5538 parse_tb (char **str
)
5543 if (skip_past_char (&p
, '[') == FAIL
)
5545 inst
.error
= _("'[' expected");
5549 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5551 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5554 inst
.operands
[0].reg
= reg
;
5556 if (skip_past_comma (&p
) == FAIL
)
5558 inst
.error
= _("',' expected");
5562 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5564 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5567 inst
.operands
[0].imm
= reg
;
5569 if (skip_past_comma (&p
) == SUCCESS
)
5571 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5573 if (inst
.reloc
.exp
.X_add_number
!= 1)
5575 inst
.error
= _("invalid shift");
5578 inst
.operands
[0].shifted
= 1;
5581 if (skip_past_char (&p
, ']') == FAIL
)
5583 inst
.error
= _("']' expected");
5590 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5591 information on the types the operands can take and how they are encoded.
5592 Up to four operands may be read; this function handles setting the
5593 ".present" field for each read operand itself.
5594 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5595 else returns FAIL. */
5598 parse_neon_mov (char **str
, int *which_operand
)
5600 int i
= *which_operand
, val
;
5601 enum arm_reg_type rtype
;
5603 struct neon_type_el optype
;
5605 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5607 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5608 inst
.operands
[i
].reg
= val
;
5609 inst
.operands
[i
].isscalar
= 1;
5610 inst
.operands
[i
].vectype
= optype
;
5611 inst
.operands
[i
++].present
= 1;
5613 if (skip_past_comma (&ptr
) == FAIL
)
5616 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5619 inst
.operands
[i
].reg
= val
;
5620 inst
.operands
[i
].isreg
= 1;
5621 inst
.operands
[i
].present
= 1;
5623 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5626 /* Cases 0, 1, 2, 3, 5 (D only). */
5627 if (skip_past_comma (&ptr
) == FAIL
)
5630 inst
.operands
[i
].reg
= val
;
5631 inst
.operands
[i
].isreg
= 1;
5632 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5633 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5634 inst
.operands
[i
].isvec
= 1;
5635 inst
.operands
[i
].vectype
= optype
;
5636 inst
.operands
[i
++].present
= 1;
5638 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5640 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5641 Case 13: VMOV <Sd>, <Rm> */
5642 inst
.operands
[i
].reg
= val
;
5643 inst
.operands
[i
].isreg
= 1;
5644 inst
.operands
[i
].present
= 1;
5646 if (rtype
== REG_TYPE_NQ
)
5648 first_error (_("can't use Neon quad register here"));
5651 else if (rtype
!= REG_TYPE_VFS
)
5654 if (skip_past_comma (&ptr
) == FAIL
)
5656 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5658 inst
.operands
[i
].reg
= val
;
5659 inst
.operands
[i
].isreg
= 1;
5660 inst
.operands
[i
].present
= 1;
5663 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5666 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5667 Case 1: VMOV<c><q> <Dd>, <Dm>
5668 Case 8: VMOV.F32 <Sd>, <Sm>
5669 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5671 inst
.operands
[i
].reg
= val
;
5672 inst
.operands
[i
].isreg
= 1;
5673 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5674 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5675 inst
.operands
[i
].isvec
= 1;
5676 inst
.operands
[i
].vectype
= optype
;
5677 inst
.operands
[i
].present
= 1;
5679 if (skip_past_comma (&ptr
) == SUCCESS
)
5684 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5687 inst
.operands
[i
].reg
= val
;
5688 inst
.operands
[i
].isreg
= 1;
5689 inst
.operands
[i
++].present
= 1;
5691 if (skip_past_comma (&ptr
) == FAIL
)
5694 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5697 inst
.operands
[i
].reg
= val
;
5698 inst
.operands
[i
].isreg
= 1;
5699 inst
.operands
[i
++].present
= 1;
5702 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5703 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5704 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5705 Case 10: VMOV.F32 <Sd>, #<imm>
5706 Case 11: VMOV.F64 <Dd>, #<imm> */
5707 inst
.operands
[i
].immisfloat
= 1;
5708 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5709 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5710 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5714 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5718 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5721 inst
.operands
[i
].reg
= val
;
5722 inst
.operands
[i
].isreg
= 1;
5723 inst
.operands
[i
++].present
= 1;
5725 if (skip_past_comma (&ptr
) == FAIL
)
5728 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5730 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5731 inst
.operands
[i
].reg
= val
;
5732 inst
.operands
[i
].isscalar
= 1;
5733 inst
.operands
[i
].present
= 1;
5734 inst
.operands
[i
].vectype
= optype
;
5736 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5738 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5739 inst
.operands
[i
].reg
= val
;
5740 inst
.operands
[i
].isreg
= 1;
5741 inst
.operands
[i
++].present
= 1;
5743 if (skip_past_comma (&ptr
) == FAIL
)
5746 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5749 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5753 inst
.operands
[i
].reg
= val
;
5754 inst
.operands
[i
].isreg
= 1;
5755 inst
.operands
[i
].isvec
= 1;
5756 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5757 inst
.operands
[i
].vectype
= optype
;
5758 inst
.operands
[i
].present
= 1;
5760 if (rtype
== REG_TYPE_VFS
)
5764 if (skip_past_comma (&ptr
) == FAIL
)
5766 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5769 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5772 inst
.operands
[i
].reg
= val
;
5773 inst
.operands
[i
].isreg
= 1;
5774 inst
.operands
[i
].isvec
= 1;
5775 inst
.operands
[i
].issingle
= 1;
5776 inst
.operands
[i
].vectype
= optype
;
5777 inst
.operands
[i
].present
= 1;
5780 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5784 inst
.operands
[i
].reg
= val
;
5785 inst
.operands
[i
].isreg
= 1;
5786 inst
.operands
[i
].isvec
= 1;
5787 inst
.operands
[i
].issingle
= 1;
5788 inst
.operands
[i
].vectype
= optype
;
5789 inst
.operands
[i
++].present
= 1;
5794 first_error (_("parse error"));
5798 /* Successfully parsed the operands. Update args. */
5804 first_error (_("expected comma"));
5808 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5812 /* Use this macro when the operand constraints are different
5813 for ARM and THUMB (e.g. ldrd). */
5814 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
5815 ((arm_operand) | ((thumb_operand) << 16))
5817 /* Matcher codes for parse_operands. */
5818 enum operand_parse_code
5820 OP_stop
, /* end of line */
5822 OP_RR
, /* ARM register */
5823 OP_RRnpc
, /* ARM register, not r15 */
5824 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
5825 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5826 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
5827 optional trailing ! */
5828 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5829 OP_RCP
, /* Coprocessor number */
5830 OP_RCN
, /* Coprocessor register */
5831 OP_RF
, /* FPA register */
5832 OP_RVS
, /* VFP single precision register */
5833 OP_RVD
, /* VFP double precision register (0..15) */
5834 OP_RND
, /* Neon double precision register (0..31) */
5835 OP_RNQ
, /* Neon quad precision register */
5836 OP_RVSD
, /* VFP single or double precision register */
5837 OP_RNDQ
, /* Neon double or quad precision register */
5838 OP_RNSDQ
, /* Neon single, double or quad precision register */
5839 OP_RNSC
, /* Neon scalar D[X] */
5840 OP_RVC
, /* VFP control register */
5841 OP_RMF
, /* Maverick F register */
5842 OP_RMD
, /* Maverick D register */
5843 OP_RMFX
, /* Maverick FX register */
5844 OP_RMDX
, /* Maverick DX register */
5845 OP_RMAX
, /* Maverick AX register */
5846 OP_RMDS
, /* Maverick DSPSC register */
5847 OP_RIWR
, /* iWMMXt wR register */
5848 OP_RIWC
, /* iWMMXt wC register */
5849 OP_RIWG
, /* iWMMXt wCG register */
5850 OP_RXA
, /* XScale accumulator register */
5852 OP_REGLST
, /* ARM register list */
5853 OP_VRSLST
, /* VFP single-precision register list */
5854 OP_VRDLST
, /* VFP double-precision register list */
5855 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5856 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5857 OP_NSTRLST
, /* Neon element/structure list */
5859 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5860 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5861 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5862 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5863 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5864 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5865 OP_VMOV
, /* Neon VMOV operands. */
5866 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
5867 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5868 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5870 OP_I0
, /* immediate zero */
5871 OP_I7
, /* immediate value 0 .. 7 */
5872 OP_I15
, /* 0 .. 15 */
5873 OP_I16
, /* 1 .. 16 */
5874 OP_I16z
, /* 0 .. 16 */
5875 OP_I31
, /* 0 .. 31 */
5876 OP_I31w
, /* 0 .. 31, optional trailing ! */
5877 OP_I32
, /* 1 .. 32 */
5878 OP_I32z
, /* 0 .. 32 */
5879 OP_I63
, /* 0 .. 63 */
5880 OP_I63s
, /* -64 .. 63 */
5881 OP_I64
, /* 1 .. 64 */
5882 OP_I64z
, /* 0 .. 64 */
5883 OP_I255
, /* 0 .. 255 */
5885 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5886 OP_I7b
, /* 0 .. 7 */
5887 OP_I15b
, /* 0 .. 15 */
5888 OP_I31b
, /* 0 .. 31 */
5890 OP_SH
, /* shifter operand */
5891 OP_SHG
, /* shifter operand with possible group relocation */
5892 OP_ADDR
, /* Memory address expression (any mode) */
5893 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5894 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5895 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5896 OP_EXP
, /* arbitrary expression */
5897 OP_EXPi
, /* same, with optional immediate prefix */
5898 OP_EXPr
, /* same, with optional relocation suffix */
5899 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5901 OP_CPSF
, /* CPS flags */
5902 OP_ENDI
, /* Endianness specifier */
5903 OP_PSR
, /* CPSR/SPSR mask for msr */
5904 OP_COND
, /* conditional code */
5905 OP_TB
, /* Table branch. */
5907 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5908 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5910 OP_RRnpc_I0
, /* ARM register or literal 0 */
5911 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5912 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5913 OP_RF_IF
, /* FPA register or immediate */
5914 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5915 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5917 /* Optional operands. */
5918 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5919 OP_oI31b
, /* 0 .. 31 */
5920 OP_oI32b
, /* 1 .. 32 */
5921 OP_oIffffb
, /* 0 .. 65535 */
5922 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5924 OP_oRR
, /* ARM register */
5925 OP_oRRnpc
, /* ARM register, not the PC */
5926 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
5927 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
5928 OP_oRND
, /* Optional Neon double precision register */
5929 OP_oRNQ
, /* Optional Neon quad precision register */
5930 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5931 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5932 OP_oSHll
, /* LSL immediate */
5933 OP_oSHar
, /* ASR immediate */
5934 OP_oSHllar
, /* LSL or ASR immediate */
5935 OP_oROR
, /* ROR 0/8/16/24 */
5936 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
5938 /* Some pre-defined mixed (ARM/THUMB) operands. */
5939 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
5940 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
5941 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
5943 OP_FIRST_OPTIONAL
= OP_oI7b
5946 /* Generic instruction operand parser. This does no encoding and no
5947 semantic validation; it merely squirrels values away in the inst
5948 structure. Returns SUCCESS or FAIL depending on whether the
5949 specified grammar matched. */
5951 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
5953 unsigned const int *upat
= pattern
;
5954 char *backtrack_pos
= 0;
5955 const char *backtrack_error
= 0;
5956 int i
, val
, backtrack_index
= 0;
5957 enum arm_reg_type rtype
;
5958 parse_operand_result result
;
5959 unsigned int op_parse_code
;
5961 #define po_char_or_fail(chr) \
5964 if (skip_past_char (&str, chr) == FAIL) \
5969 #define po_reg_or_fail(regtype) \
5972 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5973 & inst.operands[i].vectype); \
5976 first_error (_(reg_expected_msgs[regtype])); \
5979 inst.operands[i].reg = val; \
5980 inst.operands[i].isreg = 1; \
5981 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5982 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5983 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5984 || rtype == REG_TYPE_VFD \
5985 || rtype == REG_TYPE_NQ); \
5989 #define po_reg_or_goto(regtype, label) \
5992 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5993 & inst.operands[i].vectype); \
5997 inst.operands[i].reg = val; \
5998 inst.operands[i].isreg = 1; \
5999 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6000 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6001 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6002 || rtype == REG_TYPE_VFD \
6003 || rtype == REG_TYPE_NQ); \
6007 #define po_imm_or_fail(min, max, popt) \
6010 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6012 inst.operands[i].imm = val; \
6016 #define po_scalar_or_goto(elsz, label) \
6019 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6022 inst.operands[i].reg = val; \
6023 inst.operands[i].isscalar = 1; \
6027 #define po_misc_or_fail(expr) \
6035 #define po_misc_or_fail_no_backtrack(expr) \
6039 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6040 backtrack_pos = 0; \
6041 if (result != PARSE_OPERAND_SUCCESS) \
6046 #define po_barrier_or_imm(str) \
6049 val = parse_barrier (&str); \
6052 if (ISALPHA (*str)) \
6059 if ((inst.instruction & 0xf0) == 0x60 \
6062 /* ISB can only take SY as an option. */ \
6063 inst.error = _("invalid barrier type"); \
6070 skip_whitespace (str
);
6072 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6074 op_parse_code
= upat
[i
];
6075 if (op_parse_code
>= 1<<16)
6076 op_parse_code
= thumb
? (op_parse_code
>> 16)
6077 : (op_parse_code
& ((1<<16)-1));
6079 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6081 /* Remember where we are in case we need to backtrack. */
6082 gas_assert (!backtrack_pos
);
6083 backtrack_pos
= str
;
6084 backtrack_error
= inst
.error
;
6085 backtrack_index
= i
;
6088 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6089 po_char_or_fail (',');
6091 switch (op_parse_code
)
6099 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6100 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6101 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6102 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6103 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6104 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6106 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6108 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6110 /* Also accept generic coprocessor regs for unknown registers. */
6112 po_reg_or_fail (REG_TYPE_CN
);
6114 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6115 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6116 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6117 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6118 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6119 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6120 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6121 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6122 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6123 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6125 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6127 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6128 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6130 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6132 /* Neon scalar. Using an element size of 8 means that some invalid
6133 scalars are accepted here, so deal with those in later code. */
6134 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6138 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6141 po_imm_or_fail (0, 0, TRUE
);
6146 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6151 po_scalar_or_goto (8, try_rr
);
6154 po_reg_or_fail (REG_TYPE_RN
);
6160 po_scalar_or_goto (8, try_nsdq
);
6163 po_reg_or_fail (REG_TYPE_NSDQ
);
6169 po_scalar_or_goto (8, try_ndq
);
6172 po_reg_or_fail (REG_TYPE_NDQ
);
6178 po_scalar_or_goto (8, try_vfd
);
6181 po_reg_or_fail (REG_TYPE_VFD
);
6186 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6187 not careful then bad things might happen. */
6188 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6193 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6196 /* There's a possibility of getting a 64-bit immediate here, so
6197 we need special handling. */
6198 if (parse_big_immediate (&str
, i
) == FAIL
)
6200 inst
.error
= _("immediate value is out of range");
6208 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6211 po_imm_or_fail (0, 63, TRUE
);
6216 po_char_or_fail ('[');
6217 po_reg_or_fail (REG_TYPE_RN
);
6218 po_char_or_fail (']');
6224 po_reg_or_fail (REG_TYPE_RN
);
6225 if (skip_past_char (&str
, '!') == SUCCESS
)
6226 inst
.operands
[i
].writeback
= 1;
6230 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6231 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6232 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6233 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6234 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6235 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6236 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6237 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6238 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6239 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6240 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6241 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6243 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6245 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6246 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6248 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6249 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6250 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6252 /* Immediate variants */
6254 po_char_or_fail ('{');
6255 po_imm_or_fail (0, 255, TRUE
);
6256 po_char_or_fail ('}');
6260 /* The expression parser chokes on a trailing !, so we have
6261 to find it first and zap it. */
6264 while (*s
&& *s
!= ',')
6269 inst
.operands
[i
].writeback
= 1;
6271 po_imm_or_fail (0, 31, TRUE
);
6279 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6284 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6289 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6291 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6293 val
= parse_reloc (&str
);
6296 inst
.error
= _("unrecognized relocation suffix");
6299 else if (val
!= BFD_RELOC_UNUSED
)
6301 inst
.operands
[i
].imm
= val
;
6302 inst
.operands
[i
].hasreloc
= 1;
6307 /* Operand for MOVW or MOVT. */
6309 po_misc_or_fail (parse_half (&str
));
6312 /* Register or expression. */
6313 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6314 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6316 /* Register or immediate. */
6317 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6318 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6320 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6322 if (!is_immediate_prefix (*str
))
6325 val
= parse_fpa_immediate (&str
);
6328 /* FPA immediates are encoded as registers 8-15.
6329 parse_fpa_immediate has already applied the offset. */
6330 inst
.operands
[i
].reg
= val
;
6331 inst
.operands
[i
].isreg
= 1;
6334 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6335 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6337 /* Two kinds of register. */
6340 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6342 || (rege
->type
!= REG_TYPE_MMXWR
6343 && rege
->type
!= REG_TYPE_MMXWC
6344 && rege
->type
!= REG_TYPE_MMXWCG
))
6346 inst
.error
= _("iWMMXt data or control register expected");
6349 inst
.operands
[i
].reg
= rege
->number
;
6350 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6356 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6358 || (rege
->type
!= REG_TYPE_MMXWC
6359 && rege
->type
!= REG_TYPE_MMXWCG
))
6361 inst
.error
= _("iWMMXt control register expected");
6364 inst
.operands
[i
].reg
= rege
->number
;
6365 inst
.operands
[i
].isreg
= 1;
6370 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6371 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6372 case OP_oROR
: val
= parse_ror (&str
); break;
6373 case OP_PSR
: val
= parse_psr (&str
); break;
6374 case OP_COND
: val
= parse_cond (&str
); break;
6375 case OP_oBARRIER_I15
:
6376 po_barrier_or_imm (str
); break;
6378 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
6383 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
6384 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
6387 val
= parse_psr (&str
);
6391 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
6394 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6396 if (strncasecmp (str
, "APSR_", 5) == 0)
6403 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
6404 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
6405 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
6406 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
6407 default: found
= 16;
6411 inst
.operands
[i
].isvec
= 1;
6412 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6413 inst
.operands
[i
].reg
= REG_PC
;
6420 po_misc_or_fail (parse_tb (&str
));
6423 /* Register lists. */
6425 val
= parse_reg_list (&str
);
6428 inst
.operands
[1].writeback
= 1;
6434 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
6438 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
6442 /* Allow Q registers too. */
6443 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6448 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6450 inst
.operands
[i
].issingle
= 1;
6455 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6460 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
6461 &inst
.operands
[i
].vectype
);
6464 /* Addressing modes */
6466 po_misc_or_fail (parse_address (&str
, i
));
6470 po_misc_or_fail_no_backtrack (
6471 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
6475 po_misc_or_fail_no_backtrack (
6476 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
6480 po_misc_or_fail_no_backtrack (
6481 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
6485 po_misc_or_fail (parse_shifter_operand (&str
, i
));
6489 po_misc_or_fail_no_backtrack (
6490 parse_shifter_operand_group_reloc (&str
, i
));
6494 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
6498 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
6502 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
6506 as_fatal (_("unhandled operand code %d"), op_parse_code
);
6509 /* Various value-based sanity checks and shared operations. We
6510 do not signal immediate failures for the register constraints;
6511 this allows a syntax error to take precedence. */
6512 switch (op_parse_code
)
6520 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
6521 inst
.error
= BAD_PC
;
6526 if (inst
.operands
[i
].isreg
)
6528 if (inst
.operands
[i
].reg
== REG_PC
)
6529 inst
.error
= BAD_PC
;
6530 else if (inst
.operands
[i
].reg
== REG_SP
)
6531 inst
.error
= BAD_SP
;
6536 if (inst
.operands
[i
].isreg
6537 && inst
.operands
[i
].reg
== REG_PC
6538 && (inst
.operands
[i
].writeback
|| thumb
))
6539 inst
.error
= BAD_PC
;
6548 case OP_oBARRIER_I15
:
6557 inst
.operands
[i
].imm
= val
;
6564 /* If we get here, this operand was successfully parsed. */
6565 inst
.operands
[i
].present
= 1;
6569 inst
.error
= BAD_ARGS
;
6574 /* The parse routine should already have set inst.error, but set a
6575 default here just in case. */
6577 inst
.error
= _("syntax error");
6581 /* Do not backtrack over a trailing optional argument that
6582 absorbed some text. We will only fail again, with the
6583 'garbage following instruction' error message, which is
6584 probably less helpful than the current one. */
6585 if (backtrack_index
== i
&& backtrack_pos
!= str
6586 && upat
[i
+1] == OP_stop
)
6589 inst
.error
= _("syntax error");
6593 /* Try again, skipping the optional argument at backtrack_pos. */
6594 str
= backtrack_pos
;
6595 inst
.error
= backtrack_error
;
6596 inst
.operands
[backtrack_index
].present
= 0;
6597 i
= backtrack_index
;
6601 /* Check that we have parsed all the arguments. */
6602 if (*str
!= '\0' && !inst
.error
)
6603 inst
.error
= _("garbage following instruction");
6605 return inst
.error
? FAIL
: SUCCESS
;
6608 #undef po_char_or_fail
6609 #undef po_reg_or_fail
6610 #undef po_reg_or_goto
6611 #undef po_imm_or_fail
6612 #undef po_scalar_or_fail
6613 #undef po_barrier_or_imm
6615 /* Shorthand macro for instruction encoding functions issuing errors. */
6616 #define constraint(expr, err) \
6627 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6628 instructions are unpredictable if these registers are used. This
6629 is the BadReg predicate in ARM's Thumb-2 documentation. */
6630 #define reject_bad_reg(reg) \
6632 if (reg == REG_SP || reg == REG_PC) \
6634 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6639 /* If REG is R13 (the stack pointer), warn that its use is
6641 #define warn_deprecated_sp(reg) \
6643 if (warn_on_deprecated && reg == REG_SP) \
6644 as_warn (_("use of r13 is deprecated")); \
6647 /* Functions for operand encoding. ARM, then Thumb. */
6649 #define rotate_left(v, n) (v << n | v >> (32 - n))
6651 /* If VAL can be encoded in the immediate field of an ARM instruction,
6652 return the encoded form. Otherwise, return FAIL. */
6655 encode_arm_immediate (unsigned int val
)
6659 for (i
= 0; i
< 32; i
+= 2)
6660 if ((a
= rotate_left (val
, i
)) <= 0xff)
6661 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6666 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6667 return the encoded form. Otherwise, return FAIL. */
6669 encode_thumb32_immediate (unsigned int val
)
6676 for (i
= 1; i
<= 24; i
++)
6679 if ((val
& ~(0xff << i
)) == 0)
6680 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6684 if (val
== ((a
<< 16) | a
))
6686 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6690 if (val
== ((a
<< 16) | a
))
6691 return 0x200 | (a
>> 8);
6695 /* Encode a VFP SP or DP register number into inst.instruction. */
6698 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6700 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6703 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
6706 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6709 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6714 first_error (_("D register out of range for selected VFP version"));
6722 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6726 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6730 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6734 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6738 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6742 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6750 /* Encode a <shift> in an ARM-format instruction. The immediate,
6751 if any, is handled by md_apply_fix. */
6753 encode_arm_shift (int i
)
6755 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6756 inst
.instruction
|= SHIFT_ROR
<< 5;
6759 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6760 if (inst
.operands
[i
].immisreg
)
6762 inst
.instruction
|= SHIFT_BY_REG
;
6763 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6766 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6771 encode_arm_shifter_operand (int i
)
6773 if (inst
.operands
[i
].isreg
)
6775 inst
.instruction
|= inst
.operands
[i
].reg
;
6776 encode_arm_shift (i
);
6779 inst
.instruction
|= INST_IMMEDIATE
;
6782 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6784 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6786 gas_assert (inst
.operands
[i
].isreg
);
6787 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6789 if (inst
.operands
[i
].preind
)
6793 inst
.error
= _("instruction does not accept preindexed addressing");
6796 inst
.instruction
|= PRE_INDEX
;
6797 if (inst
.operands
[i
].writeback
)
6798 inst
.instruction
|= WRITE_BACK
;
6801 else if (inst
.operands
[i
].postind
)
6803 gas_assert (inst
.operands
[i
].writeback
);
6805 inst
.instruction
|= WRITE_BACK
;
6807 else /* unindexed - only for coprocessor */
6809 inst
.error
= _("instruction does not accept unindexed addressing");
6813 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6814 && (((inst
.instruction
& 0x000f0000) >> 16)
6815 == ((inst
.instruction
& 0x0000f000) >> 12)))
6816 as_warn ((inst
.instruction
& LOAD_BIT
)
6817 ? _("destination register same as write-back base")
6818 : _("source register same as write-back base"));
6821 /* inst.operands[i] was set up by parse_address. Encode it into an
6822 ARM-format mode 2 load or store instruction. If is_t is true,
6823 reject forms that cannot be used with a T instruction (i.e. not
6826 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6828 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
6830 encode_arm_addr_mode_common (i
, is_t
);
6832 if (inst
.operands
[i
].immisreg
)
6834 constraint ((inst
.operands
[i
].imm
== REG_PC
6835 || (is_pc
&& inst
.operands
[i
].writeback
)),
6837 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6838 inst
.instruction
|= inst
.operands
[i
].imm
;
6839 if (!inst
.operands
[i
].negative
)
6840 inst
.instruction
|= INDEX_UP
;
6841 if (inst
.operands
[i
].shifted
)
6843 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6844 inst
.instruction
|= SHIFT_ROR
<< 5;
6847 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6848 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6852 else /* immediate offset in inst.reloc */
6854 if (is_pc
&& !inst
.reloc
.pc_rel
)
6856 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
6858 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
6859 cannot use PC in addressing.
6860 PC cannot be used in writeback addressing, either. */
6861 constraint ((is_t
|| inst
.operands
[i
].writeback
),
6864 /* Use of PC in str is deprecated for ARMv7. */
6865 if (warn_on_deprecated
6867 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
6868 as_warn (_("use of PC in this instruction is deprecated"));
6871 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6872 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6876 /* inst.operands[i] was set up by parse_address. Encode it into an
6877 ARM-format mode 3 load or store instruction. Reject forms that
6878 cannot be used with such instructions. If is_t is true, reject
6879 forms that cannot be used with a T instruction (i.e. not
6882 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6884 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6886 inst
.error
= _("instruction does not accept scaled register index");
6890 encode_arm_addr_mode_common (i
, is_t
);
6892 if (inst
.operands
[i
].immisreg
)
6894 constraint ((inst
.operands
[i
].imm
== REG_PC
6895 || inst
.operands
[i
].reg
== REG_PC
),
6897 inst
.instruction
|= inst
.operands
[i
].imm
;
6898 if (!inst
.operands
[i
].negative
)
6899 inst
.instruction
|= INDEX_UP
;
6901 else /* immediate offset in inst.reloc */
6903 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
6904 && inst
.operands
[i
].writeback
),
6906 inst
.instruction
|= HWOFFSET_IMM
;
6907 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6908 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6912 /* inst.operands[i] was set up by parse_address. Encode it into an
6913 ARM-format instruction. Reject all forms which cannot be encoded
6914 into a coprocessor load/store instruction. If wb_ok is false,
6915 reject use of writeback; if unind_ok is false, reject use of
6916 unindexed addressing. If reloc_override is not 0, use it instead
6917 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6918 (in which case it is preserved). */
6921 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6923 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6925 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6927 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6929 gas_assert (!inst
.operands
[i
].writeback
);
6932 inst
.error
= _("instruction does not support unindexed addressing");
6935 inst
.instruction
|= inst
.operands
[i
].imm
;
6936 inst
.instruction
|= INDEX_UP
;
6940 if (inst
.operands
[i
].preind
)
6941 inst
.instruction
|= PRE_INDEX
;
6943 if (inst
.operands
[i
].writeback
)
6945 if (inst
.operands
[i
].reg
== REG_PC
)
6947 inst
.error
= _("pc may not be used with write-back");
6952 inst
.error
= _("instruction does not support writeback");
6955 inst
.instruction
|= WRITE_BACK
;
6959 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
6960 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6961 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6962 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6965 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6967 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6973 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6974 Determine whether it can be performed with a move instruction; if
6975 it can, convert inst.instruction to that move instruction and
6976 return TRUE; if it can't, convert inst.instruction to a literal-pool
6977 load and return FALSE. If this is not a valid thing to do in the
6978 current context, set inst.error and return TRUE.
6980 inst.operands[i] describes the destination register. */
6983 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6988 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6992 if ((inst
.instruction
& tbit
) == 0)
6994 inst
.error
= _("invalid pseudo operation");
6997 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6999 inst
.error
= _("constant expression expected");
7002 if (inst
.reloc
.exp
.X_op
== O_constant
)
7006 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
7008 /* This can be done with a mov(1) instruction. */
7009 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7010 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
7016 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
7019 /* This can be done with a mov instruction. */
7020 inst
.instruction
&= LITERAL_MASK
;
7021 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
7022 inst
.instruction
|= value
& 0xfff;
7026 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
7029 /* This can be done with a mvn instruction. */
7030 inst
.instruction
&= LITERAL_MASK
;
7031 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
7032 inst
.instruction
|= value
& 0xfff;
7038 if (add_to_lit_pool () == FAIL
)
7040 inst
.error
= _("literal pool insertion failed");
7043 inst
.operands
[1].reg
= REG_PC
;
7044 inst
.operands
[1].isreg
= 1;
7045 inst
.operands
[1].preind
= 1;
7046 inst
.reloc
.pc_rel
= 1;
7047 inst
.reloc
.type
= (thumb_p
7048 ? BFD_RELOC_ARM_THUMB_OFFSET
7050 ? BFD_RELOC_ARM_HWLITERAL
7051 : BFD_RELOC_ARM_LITERAL
));
7055 /* Functions for instruction encoding, sorted by sub-architecture.
7056 First some generics; their names are taken from the conventional
7057 bit positions for register arguments in ARM format instructions. */
7067 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7073 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7074 inst
.instruction
|= inst
.operands
[1].reg
;
7080 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7081 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7087 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7088 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7094 unsigned Rn
= inst
.operands
[2].reg
;
7095 /* Enforce restrictions on SWP instruction. */
7096 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
7098 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
7099 _("Rn must not overlap other operands"));
7101 /* SWP{b} is deprecated for ARMv6* and ARMv7. */
7102 if (warn_on_deprecated
7103 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7104 as_warn (_("swp{b} use is deprecated for this architecture"));
7107 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7108 inst
.instruction
|= inst
.operands
[1].reg
;
7109 inst
.instruction
|= Rn
<< 16;
7115 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7116 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7117 inst
.instruction
|= inst
.operands
[2].reg
;
7123 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
7124 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
7125 && inst
.reloc
.exp
.X_op
!= O_illegal
)
7126 || inst
.reloc
.exp
.X_add_number
!= 0),
7128 inst
.instruction
|= inst
.operands
[0].reg
;
7129 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7130 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7136 inst
.instruction
|= inst
.operands
[0].imm
;
7142 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7143 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7146 /* ARM instructions, in alphabetical order by function name (except
7147 that wrapper functions appear immediately after the function they
7150 /* This is a pseudo-op of the form "adr rd, label" to be converted
7151 into a relative address of the form "add rd, pc, #label-.-8". */
7156 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
7158 /* Frag hacking will turn this into a sub instruction if the offset turns
7159 out to be negative. */
7160 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7161 inst
.reloc
.pc_rel
= 1;
7162 inst
.reloc
.exp
.X_add_number
-= 8;
7165 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7166 into a relative address of the form:
7167 add rd, pc, #low(label-.-8)"
7168 add rd, rd, #high(label-.-8)" */
7173 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
7175 /* Frag hacking will turn this into a sub instruction if the offset turns
7176 out to be negative. */
7177 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
7178 inst
.reloc
.pc_rel
= 1;
7179 inst
.size
= INSN_SIZE
* 2;
7180 inst
.reloc
.exp
.X_add_number
-= 8;
7186 if (!inst
.operands
[1].present
)
7187 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
7188 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7189 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7190 encode_arm_shifter_operand (2);
7196 if (inst
.operands
[0].present
)
7198 constraint ((inst
.instruction
& 0xf0) != 0x40
7199 && inst
.operands
[0].imm
> 0xf
7200 && inst
.operands
[0].imm
< 0x0,
7201 _("bad barrier type"));
7202 inst
.instruction
|= inst
.operands
[0].imm
;
7205 inst
.instruction
|= 0xf;
7211 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
7212 constraint (msb
> 32, _("bit-field extends past end of register"));
7213 /* The instruction encoding stores the LSB and MSB,
7214 not the LSB and width. */
7215 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7216 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
7217 inst
.instruction
|= (msb
- 1) << 16;
7225 /* #0 in second position is alternative syntax for bfc, which is
7226 the same instruction but with REG_PC in the Rm field. */
7227 if (!inst
.operands
[1].isreg
)
7228 inst
.operands
[1].reg
= REG_PC
;
7230 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
7231 constraint (msb
> 32, _("bit-field extends past end of register"));
7232 /* The instruction encoding stores the LSB and MSB,
7233 not the LSB and width. */
7234 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7235 inst
.instruction
|= inst
.operands
[1].reg
;
7236 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7237 inst
.instruction
|= (msb
- 1) << 16;
7243 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
7244 _("bit-field extends past end of register"));
7245 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7246 inst
.instruction
|= inst
.operands
[1].reg
;
7247 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7248 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
7251 /* ARM V5 breakpoint instruction (argument parse)
7252 BKPT <16 bit unsigned immediate>
7253 Instruction is not conditional.
7254 The bit pattern given in insns[] has the COND_ALWAYS condition,
7255 and it is an error if the caller tried to override that. */
7260 /* Top 12 of 16 bits to bits 19:8. */
7261 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
7263 /* Bottom 4 of 16 bits to bits 3:0. */
7264 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
7268 encode_branch (int default_reloc
)
7270 if (inst
.operands
[0].hasreloc
)
7272 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
7273 _("the only suffix valid here is '(plt)'"));
7274 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
7278 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
7280 inst
.reloc
.pc_rel
= 1;
7287 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7288 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7291 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7298 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7300 if (inst
.cond
== COND_ALWAYS
)
7301 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
7303 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7307 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7310 /* ARM V5 branch-link-exchange instruction (argument parse)
7311 BLX <target_addr> ie BLX(1)
7312 BLX{<condition>} <Rm> ie BLX(2)
7313 Unfortunately, there are two different opcodes for this mnemonic.
7314 So, the insns[].value is not used, and the code here zaps values
7315 into inst.instruction.
7316 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7321 if (inst
.operands
[0].isreg
)
7323 /* Arg is a register; the opcode provided by insns[] is correct.
7324 It is not illegal to do "blx pc", just useless. */
7325 if (inst
.operands
[0].reg
== REG_PC
)
7326 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7328 inst
.instruction
|= inst
.operands
[0].reg
;
7332 /* Arg is an address; this instruction cannot be executed
7333 conditionally, and the opcode must be adjusted.
7334 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7335 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7336 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7337 inst
.instruction
= 0xfa000000;
7338 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
7345 bfd_boolean want_reloc
;
7347 if (inst
.operands
[0].reg
== REG_PC
)
7348 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7350 inst
.instruction
|= inst
.operands
[0].reg
;
7351 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7352 it is for ARMv4t or earlier. */
7353 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
7354 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
7358 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
7363 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
7367 /* ARM v5TEJ. Jump to Jazelle code. */
7372 if (inst
.operands
[0].reg
== REG_PC
)
7373 as_tsktsk (_("use of r15 in bxj is not really useful"));
7375 inst
.instruction
|= inst
.operands
[0].reg
;
7378 /* Co-processor data operation:
7379 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7380 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7384 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7385 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
7386 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7387 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7388 inst
.instruction
|= inst
.operands
[4].reg
;
7389 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7395 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7396 encode_arm_shifter_operand (1);
7399 /* Transfer between coprocessor and ARM registers.
7400 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7405 No special properties. */
7412 Rd
= inst
.operands
[2].reg
;
7415 if (inst
.instruction
== 0xee000010
7416 || inst
.instruction
== 0xfe000010)
7418 reject_bad_reg (Rd
);
7421 constraint (Rd
== REG_SP
, BAD_SP
);
7426 if (inst
.instruction
== 0xe000010)
7427 constraint (Rd
== REG_PC
, BAD_PC
);
7431 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7432 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
7433 inst
.instruction
|= Rd
<< 12;
7434 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7435 inst
.instruction
|= inst
.operands
[4].reg
;
7436 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7439 /* Transfer between coprocessor register and pair of ARM registers.
7440 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7445 Two XScale instructions are special cases of these:
7447 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7448 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7450 Result unpredictable if Rd or Rn is R15. */
7457 Rd
= inst
.operands
[2].reg
;
7458 Rn
= inst
.operands
[3].reg
;
7462 reject_bad_reg (Rd
);
7463 reject_bad_reg (Rn
);
7467 constraint (Rd
== REG_PC
, BAD_PC
);
7468 constraint (Rn
== REG_PC
, BAD_PC
);
7471 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7472 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
7473 inst
.instruction
|= Rd
<< 12;
7474 inst
.instruction
|= Rn
<< 16;
7475 inst
.instruction
|= inst
.operands
[4].reg
;
7481 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
7482 if (inst
.operands
[1].present
)
7484 inst
.instruction
|= CPSI_MMOD
;
7485 inst
.instruction
|= inst
.operands
[1].imm
;
7492 inst
.instruction
|= inst
.operands
[0].imm
;
7498 unsigned Rd
, Rn
, Rm
;
7500 Rd
= inst
.operands
[0].reg
;
7501 Rn
= (inst
.operands
[1].present
7502 ? inst
.operands
[1].reg
: Rd
);
7503 Rm
= inst
.operands
[2].reg
;
7505 constraint ((Rd
== REG_PC
), BAD_PC
);
7506 constraint ((Rn
== REG_PC
), BAD_PC
);
7507 constraint ((Rm
== REG_PC
), BAD_PC
);
7509 inst
.instruction
|= Rd
<< 16;
7510 inst
.instruction
|= Rn
<< 0;
7511 inst
.instruction
|= Rm
<< 8;
7517 /* There is no IT instruction in ARM mode. We
7518 process it to do the validation as if in
7519 thumb mode, just in case the code gets
7520 assembled for thumb using the unified syntax. */
7525 set_it_insn_type (IT_INSN
);
7526 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
7527 now_it
.cc
= inst
.operands
[0].imm
;
7534 int base_reg
= inst
.operands
[0].reg
;
7535 int range
= inst
.operands
[1].imm
;
7537 inst
.instruction
|= base_reg
<< 16;
7538 inst
.instruction
|= range
;
7540 if (inst
.operands
[1].writeback
)
7541 inst
.instruction
|= LDM_TYPE_2_OR_3
;
7543 if (inst
.operands
[0].writeback
)
7545 inst
.instruction
|= WRITE_BACK
;
7546 /* Check for unpredictable uses of writeback. */
7547 if (inst
.instruction
& LOAD_BIT
)
7549 /* Not allowed in LDM type 2. */
7550 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
7551 && ((range
& (1 << REG_PC
)) == 0))
7552 as_warn (_("writeback of base register is UNPREDICTABLE"));
7553 /* Only allowed if base reg not in list for other types. */
7554 else if (range
& (1 << base_reg
))
7555 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7559 /* Not allowed for type 2. */
7560 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
7561 as_warn (_("writeback of base register is UNPREDICTABLE"));
7562 /* Only allowed if base reg not in list, or first in list. */
7563 else if ((range
& (1 << base_reg
))
7564 && (range
& ((1 << base_reg
) - 1)))
7565 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7570 /* ARMv5TE load-consecutive (argument parse)
7579 constraint (inst
.operands
[0].reg
% 2 != 0,
7580 _("first destination register must be even"));
7581 constraint (inst
.operands
[1].present
7582 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7583 _("can only load two consecutive registers"));
7584 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7585 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
7587 if (!inst
.operands
[1].present
)
7588 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
7590 if (inst
.instruction
& LOAD_BIT
)
7592 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7593 register and the first register written; we have to diagnose
7594 overlap between the base and the second register written here. */
7596 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
7597 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
7598 as_warn (_("base register written back, and overlaps "
7599 "second destination register"));
7601 /* For an index-register load, the index register must not overlap the
7602 destination (even if not write-back). */
7603 else if (inst
.operands
[2].immisreg
7604 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
7605 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
7606 as_warn (_("index register overlaps destination register"));
7609 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7610 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
7616 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
7617 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
7618 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
7619 || inst
.operands
[1].negative
7620 /* This can arise if the programmer has written
7622 or if they have mistakenly used a register name as the last
7625 It is very difficult to distinguish between these two cases
7626 because "rX" might actually be a label. ie the register
7627 name has been occluded by a symbol of the same name. So we
7628 just generate a general 'bad addressing mode' type error
7629 message and leave it up to the programmer to discover the
7630 true cause and fix their mistake. */
7631 || (inst
.operands
[1].reg
== REG_PC
),
7634 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7635 || inst
.reloc
.exp
.X_add_number
!= 0,
7636 _("offset must be zero in ARM encoding"));
7638 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
7640 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7641 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7642 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7648 constraint (inst
.operands
[0].reg
% 2 != 0,
7649 _("even register required"));
7650 constraint (inst
.operands
[1].present
7651 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7652 _("can only load two consecutive registers"));
7653 /* If op 1 were present and equal to PC, this function wouldn't
7654 have been called in the first place. */
7655 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7657 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7658 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7664 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7665 if (!inst
.operands
[1].isreg
)
7666 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
7668 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
7674 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7676 if (inst
.operands
[1].preind
)
7678 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7679 || inst
.reloc
.exp
.X_add_number
!= 0,
7680 _("this instruction requires a post-indexed address"));
7682 inst
.operands
[1].preind
= 0;
7683 inst
.operands
[1].postind
= 1;
7684 inst
.operands
[1].writeback
= 1;
7686 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7687 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
7690 /* Halfword and signed-byte load/store operations. */
7695 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
7696 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7697 if (!inst
.operands
[1].isreg
)
7698 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7700 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7706 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7708 if (inst
.operands
[1].preind
)
7710 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7711 || inst
.reloc
.exp
.X_add_number
!= 0,
7712 _("this instruction requires a post-indexed address"));
7714 inst
.operands
[1].preind
= 0;
7715 inst
.operands
[1].postind
= 1;
7716 inst
.operands
[1].writeback
= 1;
7718 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7719 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7722 /* Co-processor register load/store.
7723 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7727 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7728 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7729 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7735 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7736 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7737 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
7738 && !(inst
.instruction
& 0x00400000))
7739 as_tsktsk (_("Rd and Rm should be different in mla"));
7741 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7742 inst
.instruction
|= inst
.operands
[1].reg
;
7743 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7744 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7750 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7751 encode_arm_shifter_operand (1);
7754 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7761 top
= (inst
.instruction
& 0x00400000) != 0;
7762 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7763 _(":lower16: not allowed this instruction"));
7764 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7765 _(":upper16: not allowed instruction"));
7766 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7767 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7769 imm
= inst
.reloc
.exp
.X_add_number
;
7770 /* The value is in two pieces: 0:11, 16:19. */
7771 inst
.instruction
|= (imm
& 0x00000fff);
7772 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7776 static void do_vfp_nsyn_opcode (const char *);
7779 do_vfp_nsyn_mrs (void)
7781 if (inst
.operands
[0].isvec
)
7783 if (inst
.operands
[1].reg
!= 1)
7784 first_error (_("operand 1 must be FPSCR"));
7785 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7786 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7787 do_vfp_nsyn_opcode ("fmstat");
7789 else if (inst
.operands
[1].isvec
)
7790 do_vfp_nsyn_opcode ("fmrx");
7798 do_vfp_nsyn_msr (void)
7800 if (inst
.operands
[0].isvec
)
7801 do_vfp_nsyn_opcode ("fmxr");
7811 unsigned Rt
= inst
.operands
[0].reg
;
7813 if (thumb_mode
&& inst
.operands
[0].reg
== REG_SP
)
7815 inst
.error
= BAD_SP
;
7819 /* APSR_ sets isvec. All other refs to PC are illegal. */
7820 if (!inst
.operands
[0].isvec
&& inst
.operands
[0].reg
== REG_PC
)
7822 inst
.error
= BAD_PC
;
7826 if (inst
.operands
[1].reg
!= 1)
7827 first_error (_("operand 1 must be FPSCR"));
7829 inst
.instruction
|= (Rt
<< 12);
7835 unsigned Rt
= inst
.operands
[1].reg
;
7838 reject_bad_reg (Rt
);
7839 else if (Rt
== REG_PC
)
7841 inst
.error
= BAD_PC
;
7845 if (inst
.operands
[0].reg
!= 1)
7846 first_error (_("operand 0 must be FPSCR"));
7848 inst
.instruction
|= (Rt
<< 12);
7854 if (do_vfp_nsyn_mrs () == SUCCESS
)
7857 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7858 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7860 _("'CPSR' or 'SPSR' expected"));
7861 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
7862 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7863 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7866 /* Two possible forms:
7867 "{C|S}PSR_<field>, Rm",
7868 "{C|S}PSR_f, #expression". */
7873 if (do_vfp_nsyn_msr () == SUCCESS
)
7876 inst
.instruction
|= inst
.operands
[0].imm
;
7877 if (inst
.operands
[1].isreg
)
7878 inst
.instruction
|= inst
.operands
[1].reg
;
7881 inst
.instruction
|= INST_IMMEDIATE
;
7882 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7883 inst
.reloc
.pc_rel
= 0;
7890 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
7892 if (!inst
.operands
[2].present
)
7893 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7894 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7895 inst
.instruction
|= inst
.operands
[1].reg
;
7896 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7898 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7899 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7900 as_tsktsk (_("Rd and Rm should be different in mul"));
7903 /* Long Multiply Parser
7904 UMULL RdLo, RdHi, Rm, Rs
7905 SMULL RdLo, RdHi, Rm, Rs
7906 UMLAL RdLo, RdHi, Rm, Rs
7907 SMLAL RdLo, RdHi, Rm, Rs. */
7912 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7913 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7914 inst
.instruction
|= inst
.operands
[2].reg
;
7915 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7917 /* rdhi and rdlo must be different. */
7918 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7919 as_tsktsk (_("rdhi and rdlo must be different"));
7921 /* rdhi, rdlo and rm must all be different before armv6. */
7922 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
7923 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7924 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7925 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7931 if (inst
.operands
[0].present
7932 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
7934 /* Architectural NOP hints are CPSR sets with no bits selected. */
7935 inst
.instruction
&= 0xf0000000;
7936 inst
.instruction
|= 0x0320f000;
7937 if (inst
.operands
[0].present
)
7938 inst
.instruction
|= inst
.operands
[0].imm
;
7942 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7943 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7944 Condition defaults to COND_ALWAYS.
7945 Error if Rd, Rn or Rm are R15. */
7950 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7951 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7952 inst
.instruction
|= inst
.operands
[2].reg
;
7953 if (inst
.operands
[3].present
)
7954 encode_arm_shift (3);
7957 /* ARM V6 PKHTB (Argument Parse). */
7962 if (!inst
.operands
[3].present
)
7964 /* If the shift specifier is omitted, turn the instruction
7965 into pkhbt rd, rm, rn. */
7966 inst
.instruction
&= 0xfff00010;
7967 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7968 inst
.instruction
|= inst
.operands
[1].reg
;
7969 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7973 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7974 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7975 inst
.instruction
|= inst
.operands
[2].reg
;
7976 encode_arm_shift (3);
7980 /* ARMv5TE: Preload-Cache
7981 MP Extensions: Preload for write
7985 Syntactically, like LDR with B=1, W=0, L=1. */
7990 constraint (!inst
.operands
[0].isreg
,
7991 _("'[' expected after PLD mnemonic"));
7992 constraint (inst
.operands
[0].postind
,
7993 _("post-indexed expression used in preload instruction"));
7994 constraint (inst
.operands
[0].writeback
,
7995 _("writeback used in preload instruction"));
7996 constraint (!inst
.operands
[0].preind
,
7997 _("unindexed addressing used in preload instruction"));
7998 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
8001 /* ARMv7: PLI <addr_mode> */
8005 constraint (!inst
.operands
[0].isreg
,
8006 _("'[' expected after PLI mnemonic"));
8007 constraint (inst
.operands
[0].postind
,
8008 _("post-indexed expression used in preload instruction"));
8009 constraint (inst
.operands
[0].writeback
,
8010 _("writeback used in preload instruction"));
8011 constraint (!inst
.operands
[0].preind
,
8012 _("unindexed addressing used in preload instruction"));
8013 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
8014 inst
.instruction
&= ~PRE_INDEX
;
8020 inst
.operands
[1] = inst
.operands
[0];
8021 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
8022 inst
.operands
[0].isreg
= 1;
8023 inst
.operands
[0].writeback
= 1;
8024 inst
.operands
[0].reg
= REG_SP
;
8028 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8029 word at the specified address and the following word
8031 Unconditionally executed.
8032 Error if Rn is R15. */
8037 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8038 if (inst
.operands
[0].writeback
)
8039 inst
.instruction
|= WRITE_BACK
;
8042 /* ARM V6 ssat (argument parse). */
8047 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8048 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
8049 inst
.instruction
|= inst
.operands
[2].reg
;
8051 if (inst
.operands
[3].present
)
8052 encode_arm_shift (3);
8055 /* ARM V6 usat (argument parse). */
8060 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8061 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
8062 inst
.instruction
|= inst
.operands
[2].reg
;
8064 if (inst
.operands
[3].present
)
8065 encode_arm_shift (3);
8068 /* ARM V6 ssat16 (argument parse). */
8073 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8074 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
8075 inst
.instruction
|= inst
.operands
[2].reg
;
8081 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8082 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
8083 inst
.instruction
|= inst
.operands
[2].reg
;
8086 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
8087 preserving the other bits.
8089 setend <endian_specifier>, where <endian_specifier> is either
8095 if (inst
.operands
[0].imm
)
8096 inst
.instruction
|= 0x200;
8102 unsigned int Rm
= (inst
.operands
[1].present
8103 ? inst
.operands
[1].reg
8104 : inst
.operands
[0].reg
);
8106 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8107 inst
.instruction
|= Rm
;
8108 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
8110 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8111 inst
.instruction
|= SHIFT_BY_REG
;
8114 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
8120 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
8121 inst
.reloc
.pc_rel
= 0;
8127 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
8128 inst
.reloc
.pc_rel
= 0;
8131 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8132 SMLAxy{cond} Rd,Rm,Rs,Rn
8133 SMLAWy{cond} Rd,Rm,Rs,Rn
8134 Error if any register is R15. */
8139 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8140 inst
.instruction
|= inst
.operands
[1].reg
;
8141 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8142 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8145 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8146 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8147 Error if any register is R15.
8148 Warning if Rdlo == Rdhi. */
8153 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8154 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8155 inst
.instruction
|= inst
.operands
[2].reg
;
8156 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
8158 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8159 as_tsktsk (_("rdhi and rdlo must be different"));
8162 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8163 SMULxy{cond} Rd,Rm,Rs
8164 Error if any register is R15. */
8169 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8170 inst
.instruction
|= inst
.operands
[1].reg
;
8171 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8174 /* ARM V6 srs (argument parse). The variable fields in the encoding are
8175 the same for both ARM and Thumb-2. */
8182 if (inst
.operands
[0].present
)
8184 reg
= inst
.operands
[0].reg
;
8185 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
8190 inst
.instruction
|= reg
<< 16;
8191 inst
.instruction
|= inst
.operands
[1].imm
;
8192 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
8193 inst
.instruction
|= WRITE_BACK
;
8196 /* ARM V6 strex (argument parse). */
8201 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
8202 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
8203 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
8204 || inst
.operands
[2].negative
8205 /* See comment in do_ldrex(). */
8206 || (inst
.operands
[2].reg
== REG_PC
),
8209 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
8210 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
8212 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8213 || inst
.reloc
.exp
.X_add_number
!= 0,
8214 _("offset must be zero in ARM encoding"));
8216 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8217 inst
.instruction
|= inst
.operands
[1].reg
;
8218 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8219 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8225 constraint (inst
.operands
[1].reg
% 2 != 0,
8226 _("even register required"));
8227 constraint (inst
.operands
[2].present
8228 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
8229 _("can only store two consecutive registers"));
8230 /* If op 2 were present and equal to PC, this function wouldn't
8231 have been called in the first place. */
8232 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
8234 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
8235 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
8236 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
8239 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8240 inst
.instruction
|= inst
.operands
[1].reg
;
8241 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8244 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8245 extends it to 32-bits, and adds the result to a value in another
8246 register. You can specify a rotation by 0, 8, 16, or 24 bits
8247 before extracting the 16-bit value.
8248 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8249 Condition defaults to COND_ALWAYS.
8250 Error if any register uses R15. */
8255 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8256 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8257 inst
.instruction
|= inst
.operands
[2].reg
;
8258 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
8263 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8264 Condition defaults to COND_ALWAYS.
8265 Error if any register uses R15. */
8270 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8271 inst
.instruction
|= inst
.operands
[1].reg
;
8272 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
8275 /* VFP instructions. In a logical order: SP variant first, monad
8276 before dyad, arithmetic then move then load/store. */
8279 do_vfp_sp_monadic (void)
8281 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8282 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8286 do_vfp_sp_dyadic (void)
8288 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8289 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8290 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8294 do_vfp_sp_compare_z (void)
8296 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8300 do_vfp_dp_sp_cvt (void)
8302 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8303 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8307 do_vfp_sp_dp_cvt (void)
8309 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8310 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8314 do_vfp_reg_from_sp (void)
8316 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8317 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8321 do_vfp_reg2_from_sp2 (void)
8323 constraint (inst
.operands
[2].imm
!= 2,
8324 _("only two consecutive VFP SP registers allowed here"));
8325 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8326 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8327 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8331 do_vfp_sp_from_reg (void)
8333 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
8334 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8338 do_vfp_sp2_from_reg2 (void)
8340 constraint (inst
.operands
[0].imm
!= 2,
8341 _("only two consecutive VFP SP registers allowed here"));
8342 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
8343 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8344 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8348 do_vfp_sp_ldst (void)
8350 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8351 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8355 do_vfp_dp_ldst (void)
8357 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8358 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8363 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
8365 if (inst
.operands
[0].writeback
)
8366 inst
.instruction
|= WRITE_BACK
;
8368 constraint (ldstm_type
!= VFP_LDSTMIA
,
8369 _("this addressing mode requires base-register writeback"));
8370 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8371 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
8372 inst
.instruction
|= inst
.operands
[1].imm
;
8376 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
8380 if (inst
.operands
[0].writeback
)
8381 inst
.instruction
|= WRITE_BACK
;
8383 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
8384 _("this addressing mode requires base-register writeback"));
8386 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8387 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8389 count
= inst
.operands
[1].imm
<< 1;
8390 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
8393 inst
.instruction
|= count
;
8397 do_vfp_sp_ldstmia (void)
8399 vfp_sp_ldstm (VFP_LDSTMIA
);
8403 do_vfp_sp_ldstmdb (void)
8405 vfp_sp_ldstm (VFP_LDSTMDB
);
8409 do_vfp_dp_ldstmia (void)
8411 vfp_dp_ldstm (VFP_LDSTMIA
);
8415 do_vfp_dp_ldstmdb (void)
8417 vfp_dp_ldstm (VFP_LDSTMDB
);
8421 do_vfp_xp_ldstmia (void)
8423 vfp_dp_ldstm (VFP_LDSTMIAX
);
8427 do_vfp_xp_ldstmdb (void)
8429 vfp_dp_ldstm (VFP_LDSTMDBX
);
8433 do_vfp_dp_rd_rm (void)
8435 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8436 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8440 do_vfp_dp_rn_rd (void)
8442 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
8443 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8447 do_vfp_dp_rd_rn (void)
8449 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8450 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8454 do_vfp_dp_rd_rn_rm (void)
8456 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8457 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8458 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
8464 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8468 do_vfp_dp_rm_rd_rn (void)
8470 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
8471 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8472 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
8475 /* VFPv3 instructions. */
8477 do_vfp_sp_const (void)
8479 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8480 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8481 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8485 do_vfp_dp_const (void)
8487 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8488 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8489 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8493 vfp_conv (int srcsize
)
8495 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
8496 inst
.instruction
|= (immbits
& 1) << 5;
8497 inst
.instruction
|= (immbits
>> 1);
8501 do_vfp_sp_conv_16 (void)
8503 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8508 do_vfp_dp_conv_16 (void)
8510 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8515 do_vfp_sp_conv_32 (void)
8517 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8522 do_vfp_dp_conv_32 (void)
8524 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8528 /* FPA instructions. Also in a logical order. */
8533 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8534 inst
.instruction
|= inst
.operands
[1].reg
;
8538 do_fpa_ldmstm (void)
8540 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8541 switch (inst
.operands
[1].imm
)
8543 case 1: inst
.instruction
|= CP_T_X
; break;
8544 case 2: inst
.instruction
|= CP_T_Y
; break;
8545 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
8550 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
8552 /* The instruction specified "ea" or "fd", so we can only accept
8553 [Rn]{!}. The instruction does not really support stacking or
8554 unstacking, so we have to emulate these by setting appropriate
8555 bits and offsets. */
8556 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8557 || inst
.reloc
.exp
.X_add_number
!= 0,
8558 _("this instruction does not support indexing"));
8560 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
8561 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
8563 if (!(inst
.instruction
& INDEX_UP
))
8564 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
8566 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
8568 inst
.operands
[2].preind
= 0;
8569 inst
.operands
[2].postind
= 1;
8573 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8576 /* iWMMXt instructions: strictly in alphabetical order. */
8579 do_iwmmxt_tandorc (void)
8581 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
8585 do_iwmmxt_textrc (void)
8587 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8588 inst
.instruction
|= inst
.operands
[1].imm
;
8592 do_iwmmxt_textrm (void)
8594 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8595 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8596 inst
.instruction
|= inst
.operands
[2].imm
;
8600 do_iwmmxt_tinsr (void)
8602 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8603 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8604 inst
.instruction
|= inst
.operands
[2].imm
;
8608 do_iwmmxt_tmia (void)
8610 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8611 inst
.instruction
|= inst
.operands
[1].reg
;
8612 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8616 do_iwmmxt_waligni (void)
8618 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8619 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8620 inst
.instruction
|= inst
.operands
[2].reg
;
8621 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
8625 do_iwmmxt_wmerge (void)
8627 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8628 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8629 inst
.instruction
|= inst
.operands
[2].reg
;
8630 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
8634 do_iwmmxt_wmov (void)
8636 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8637 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8638 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8639 inst
.instruction
|= inst
.operands
[1].reg
;
8643 do_iwmmxt_wldstbh (void)
8646 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8648 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
8650 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
8651 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
8655 do_iwmmxt_wldstw (void)
8657 /* RIWR_RIWC clears .isreg for a control register. */
8658 if (!inst
.operands
[0].isreg
)
8660 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8661 inst
.instruction
|= 0xf0000000;
8664 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8665 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8669 do_iwmmxt_wldstd (void)
8671 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8672 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
8673 && inst
.operands
[1].immisreg
)
8675 inst
.instruction
&= ~0x1a000ff;
8676 inst
.instruction
|= (0xf << 28);
8677 if (inst
.operands
[1].preind
)
8678 inst
.instruction
|= PRE_INDEX
;
8679 if (!inst
.operands
[1].negative
)
8680 inst
.instruction
|= INDEX_UP
;
8681 if (inst
.operands
[1].writeback
)
8682 inst
.instruction
|= WRITE_BACK
;
8683 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8684 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8685 inst
.instruction
|= inst
.operands
[1].imm
;
8688 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
8692 do_iwmmxt_wshufh (void)
8694 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8695 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8696 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
8697 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
8701 do_iwmmxt_wzero (void)
8703 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8704 inst
.instruction
|= inst
.operands
[0].reg
;
8705 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8706 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8710 do_iwmmxt_wrwrwr_or_imm5 (void)
8712 if (inst
.operands
[2].isreg
)
8715 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
8716 _("immediate operand requires iWMMXt2"));
8718 if (inst
.operands
[2].imm
== 0)
8720 switch ((inst
.instruction
>> 20) & 0xf)
8726 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8727 inst
.operands
[2].imm
= 16;
8728 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
8734 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8735 inst
.operands
[2].imm
= 32;
8736 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
8743 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8745 wrn
= (inst
.instruction
>> 16) & 0xf;
8746 inst
.instruction
&= 0xff0fff0f;
8747 inst
.instruction
|= wrn
;
8748 /* Bail out here; the instruction is now assembled. */
8753 /* Map 32 -> 0, etc. */
8754 inst
.operands
[2].imm
&= 0x1f;
8755 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8759 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8760 operations first, then control, shift, and load/store. */
8762 /* Insns like "foo X,Y,Z". */
8765 do_mav_triple (void)
8767 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8768 inst
.instruction
|= inst
.operands
[1].reg
;
8769 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8772 /* Insns like "foo W,X,Y,Z".
8773 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8778 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8779 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8780 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8781 inst
.instruction
|= inst
.operands
[3].reg
;
8784 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8788 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8791 /* Maverick shift immediate instructions.
8792 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8793 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8798 int imm
= inst
.operands
[2].imm
;
8800 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8801 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8803 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8804 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8805 Bit 4 should be 0. */
8806 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8808 inst
.instruction
|= imm
;
8811 /* XScale instructions. Also sorted arithmetic before move. */
8813 /* Xscale multiply-accumulate (argument parse)
8816 MIAxycc acc0,Rm,Rs. */
8821 inst
.instruction
|= inst
.operands
[1].reg
;
8822 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8825 /* Xscale move-accumulator-register (argument parse)
8827 MARcc acc0,RdLo,RdHi. */
8832 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8833 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8836 /* Xscale move-register-accumulator (argument parse)
8838 MRAcc RdLo,RdHi,acc0. */
8843 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8844 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8845 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8848 /* Encoding functions relevant only to Thumb. */
8850 /* inst.operands[i] is a shifted-register operand; encode
8851 it into inst.instruction in the format used by Thumb32. */
8854 encode_thumb32_shifted_operand (int i
)
8856 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8857 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8859 constraint (inst
.operands
[i
].immisreg
,
8860 _("shift by register not allowed in thumb mode"));
8861 inst
.instruction
|= inst
.operands
[i
].reg
;
8862 if (shift
== SHIFT_RRX
)
8863 inst
.instruction
|= SHIFT_ROR
<< 4;
8866 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8867 _("expression too complex"));
8869 constraint (value
> 32
8870 || (value
== 32 && (shift
== SHIFT_LSL
8871 || shift
== SHIFT_ROR
)),
8872 _("shift expression is too large"));
8876 else if (value
== 32)
8879 inst
.instruction
|= shift
<< 4;
8880 inst
.instruction
|= (value
& 0x1c) << 10;
8881 inst
.instruction
|= (value
& 0x03) << 6;
8886 /* inst.operands[i] was set up by parse_address. Encode it into a
8887 Thumb32 format load or store instruction. Reject forms that cannot
8888 be used with such instructions. If is_t is true, reject forms that
8889 cannot be used with a T instruction; if is_d is true, reject forms
8890 that cannot be used with a D instruction. If it is a store insn,
8894 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8896 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8898 constraint (!inst
.operands
[i
].isreg
,
8899 _("Instruction does not support =N addresses"));
8901 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8902 if (inst
.operands
[i
].immisreg
)
8904 constraint (is_pc
, BAD_PC_ADDRESSING
);
8905 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8906 constraint (inst
.operands
[i
].negative
,
8907 _("Thumb does not support negative register indexing"));
8908 constraint (inst
.operands
[i
].postind
,
8909 _("Thumb does not support register post-indexing"));
8910 constraint (inst
.operands
[i
].writeback
,
8911 _("Thumb does not support register indexing with writeback"));
8912 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8913 _("Thumb supports only LSL in shifted register indexing"));
8915 inst
.instruction
|= inst
.operands
[i
].imm
;
8916 if (inst
.operands
[i
].shifted
)
8918 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8919 _("expression too complex"));
8920 constraint (inst
.reloc
.exp
.X_add_number
< 0
8921 || inst
.reloc
.exp
.X_add_number
> 3,
8922 _("shift out of range"));
8923 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8925 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8927 else if (inst
.operands
[i
].preind
)
8929 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
8930 constraint (is_t
&& inst
.operands
[i
].writeback
,
8931 _("cannot use writeback with this instruction"));
8932 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0)
8933 && !inst
.reloc
.pc_rel
, BAD_PC_ADDRESSING
);
8937 inst
.instruction
|= 0x01000000;
8938 if (inst
.operands
[i
].writeback
)
8939 inst
.instruction
|= 0x00200000;
8943 inst
.instruction
|= 0x00000c00;
8944 if (inst
.operands
[i
].writeback
)
8945 inst
.instruction
|= 0x00000100;
8947 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8949 else if (inst
.operands
[i
].postind
)
8951 gas_assert (inst
.operands
[i
].writeback
);
8952 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8953 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8956 inst
.instruction
|= 0x00200000;
8958 inst
.instruction
|= 0x00000900;
8959 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8961 else /* unindexed - only for coprocessor */
8962 inst
.error
= _("instruction does not accept unindexed addressing");
8965 /* Table of Thumb instructions which exist in both 16- and 32-bit
8966 encodings (the latter only in post-V6T2 cores). The index is the
8967 value used in the insns table below. When there is more than one
8968 possible 16-bit encoding for the instruction, this table always
8970 Also contains several pseudo-instructions used during relaxation. */
8971 #define T16_32_TAB \
8972 X(_adc, 4140, eb400000), \
8973 X(_adcs, 4140, eb500000), \
8974 X(_add, 1c00, eb000000), \
8975 X(_adds, 1c00, eb100000), \
8976 X(_addi, 0000, f1000000), \
8977 X(_addis, 0000, f1100000), \
8978 X(_add_pc,000f, f20f0000), \
8979 X(_add_sp,000d, f10d0000), \
8980 X(_adr, 000f, f20f0000), \
8981 X(_and, 4000, ea000000), \
8982 X(_ands, 4000, ea100000), \
8983 X(_asr, 1000, fa40f000), \
8984 X(_asrs, 1000, fa50f000), \
8985 X(_b, e000, f000b000), \
8986 X(_bcond, d000, f0008000), \
8987 X(_bic, 4380, ea200000), \
8988 X(_bics, 4380, ea300000), \
8989 X(_cmn, 42c0, eb100f00), \
8990 X(_cmp, 2800, ebb00f00), \
8991 X(_cpsie, b660, f3af8400), \
8992 X(_cpsid, b670, f3af8600), \
8993 X(_cpy, 4600, ea4f0000), \
8994 X(_dec_sp,80dd, f1ad0d00), \
8995 X(_eor, 4040, ea800000), \
8996 X(_eors, 4040, ea900000), \
8997 X(_inc_sp,00dd, f10d0d00), \
8998 X(_ldmia, c800, e8900000), \
8999 X(_ldr, 6800, f8500000), \
9000 X(_ldrb, 7800, f8100000), \
9001 X(_ldrh, 8800, f8300000), \
9002 X(_ldrsb, 5600, f9100000), \
9003 X(_ldrsh, 5e00, f9300000), \
9004 X(_ldr_pc,4800, f85f0000), \
9005 X(_ldr_pc2,4800, f85f0000), \
9006 X(_ldr_sp,9800, f85d0000), \
9007 X(_lsl, 0000, fa00f000), \
9008 X(_lsls, 0000, fa10f000), \
9009 X(_lsr, 0800, fa20f000), \
9010 X(_lsrs, 0800, fa30f000), \
9011 X(_mov, 2000, ea4f0000), \
9012 X(_movs, 2000, ea5f0000), \
9013 X(_mul, 4340, fb00f000), \
9014 X(_muls, 4340, ffffffff), /* no 32b muls */ \
9015 X(_mvn, 43c0, ea6f0000), \
9016 X(_mvns, 43c0, ea7f0000), \
9017 X(_neg, 4240, f1c00000), /* rsb #0 */ \
9018 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
9019 X(_orr, 4300, ea400000), \
9020 X(_orrs, 4300, ea500000), \
9021 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
9022 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
9023 X(_rev, ba00, fa90f080), \
9024 X(_rev16, ba40, fa90f090), \
9025 X(_revsh, bac0, fa90f0b0), \
9026 X(_ror, 41c0, fa60f000), \
9027 X(_rors, 41c0, fa70f000), \
9028 X(_sbc, 4180, eb600000), \
9029 X(_sbcs, 4180, eb700000), \
9030 X(_stmia, c000, e8800000), \
9031 X(_str, 6000, f8400000), \
9032 X(_strb, 7000, f8000000), \
9033 X(_strh, 8000, f8200000), \
9034 X(_str_sp,9000, f84d0000), \
9035 X(_sub, 1e00, eba00000), \
9036 X(_subs, 1e00, ebb00000), \
9037 X(_subi, 8000, f1a00000), \
9038 X(_subis, 8000, f1b00000), \
9039 X(_sxtb, b240, fa4ff080), \
9040 X(_sxth, b200, fa0ff080), \
9041 X(_tst, 4200, ea100f00), \
9042 X(_uxtb, b2c0, fa5ff080), \
9043 X(_uxth, b280, fa1ff080), \
9044 X(_nop, bf00, f3af8000), \
9045 X(_yield, bf10, f3af8001), \
9046 X(_wfe, bf20, f3af8002), \
9047 X(_wfi, bf30, f3af8003), \
9048 X(_sev, bf40, f3af8004),
9050 /* To catch errors in encoding functions, the codes are all offset by
9051 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9052 as 16-bit instructions. */
9053 #define X(a,b,c) T_MNEM##a
9054 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
9057 #define X(a,b,c) 0x##b
9058 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
9059 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9062 #define X(a,b,c) 0x##c
9063 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
9064 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9065 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
9069 /* Thumb instruction encoders, in alphabetical order. */
9074 do_t_add_sub_w (void)
9078 Rd
= inst
.operands
[0].reg
;
9079 Rn
= inst
.operands
[1].reg
;
9081 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9082 is the SP-{plus,minus}-immediate form of the instruction. */
9084 constraint (Rd
== REG_PC
, BAD_PC
);
9086 reject_bad_reg (Rd
);
9088 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
9089 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
9092 /* Parse an add or subtract instruction. We get here with inst.instruction
9093 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
9100 Rd
= inst
.operands
[0].reg
;
9101 Rs
= (inst
.operands
[1].present
9102 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9103 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9106 set_it_insn_type_last ();
9114 flags
= (inst
.instruction
== T_MNEM_adds
9115 || inst
.instruction
== T_MNEM_subs
);
9117 narrow
= !in_it_block ();
9119 narrow
= in_it_block ();
9120 if (!inst
.operands
[2].isreg
)
9124 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
9126 add
= (inst
.instruction
== T_MNEM_add
9127 || inst
.instruction
== T_MNEM_adds
);
9129 if (inst
.size_req
!= 4)
9131 /* Attempt to use a narrow opcode, with relaxation if
9133 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
9134 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
9135 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
9136 opcode
= T_MNEM_add_sp
;
9137 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
9138 opcode
= T_MNEM_add_pc
;
9139 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
9142 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
9144 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
9148 inst
.instruction
= THUMB_OP16(opcode
);
9149 inst
.instruction
|= (Rd
<< 4) | Rs
;
9150 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9151 if (inst
.size_req
!= 2)
9152 inst
.relax
= opcode
;
9155 constraint (inst
.size_req
== 2, BAD_HIREG
);
9157 if (inst
.size_req
== 4
9158 || (inst
.size_req
!= 2 && !opcode
))
9162 constraint (add
, BAD_PC
);
9163 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
9164 _("only SUBS PC, LR, #const allowed"));
9165 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9166 _("expression too complex"));
9167 constraint (inst
.reloc
.exp
.X_add_number
< 0
9168 || inst
.reloc
.exp
.X_add_number
> 0xff,
9169 _("immediate value out of range"));
9170 inst
.instruction
= T2_SUBS_PC_LR
9171 | inst
.reloc
.exp
.X_add_number
;
9172 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9175 else if (Rs
== REG_PC
)
9177 /* Always use addw/subw. */
9178 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
9179 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
9183 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9184 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
9187 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9189 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
9191 inst
.instruction
|= Rd
<< 8;
9192 inst
.instruction
|= Rs
<< 16;
9197 Rn
= inst
.operands
[2].reg
;
9198 /* See if we can do this with a 16-bit instruction. */
9199 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
9201 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
9206 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
9207 || inst
.instruction
== T_MNEM_add
)
9210 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
9214 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
9216 /* Thumb-1 cores (except v6-M) require at least one high
9217 register in a narrow non flag setting add. */
9218 if (Rd
> 7 || Rn
> 7
9219 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
9220 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
9227 inst
.instruction
= T_OPCODE_ADD_HI
;
9228 inst
.instruction
|= (Rd
& 8) << 4;
9229 inst
.instruction
|= (Rd
& 7);
9230 inst
.instruction
|= Rn
<< 3;
9236 constraint (Rd
== REG_PC
, BAD_PC
);
9237 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
9238 constraint (Rs
== REG_PC
, BAD_PC
);
9239 reject_bad_reg (Rn
);
9241 /* If we get here, it can't be done in 16 bits. */
9242 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
9243 _("shift must be constant"));
9244 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9245 inst
.instruction
|= Rd
<< 8;
9246 inst
.instruction
|= Rs
<< 16;
9247 encode_thumb32_shifted_operand (2);
9252 constraint (inst
.instruction
== T_MNEM_adds
9253 || inst
.instruction
== T_MNEM_subs
,
9256 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
9258 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
9259 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
9262 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9264 inst
.instruction
|= (Rd
<< 4) | Rs
;
9265 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9269 Rn
= inst
.operands
[2].reg
;
9270 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
9272 /* We now have Rd, Rs, and Rn set to registers. */
9273 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
9275 /* Can't do this for SUB. */
9276 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
9277 inst
.instruction
= T_OPCODE_ADD_HI
;
9278 inst
.instruction
|= (Rd
& 8) << 4;
9279 inst
.instruction
|= (Rd
& 7);
9281 inst
.instruction
|= Rn
<< 3;
9283 inst
.instruction
|= Rs
<< 3;
9285 constraint (1, _("dest must overlap one source register"));
9289 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9290 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
9291 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
9301 Rd
= inst
.operands
[0].reg
;
9302 reject_bad_reg (Rd
);
9304 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
9306 /* Defer to section relaxation. */
9307 inst
.relax
= inst
.instruction
;
9308 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9309 inst
.instruction
|= Rd
<< 4;
9311 else if (unified_syntax
&& inst
.size_req
!= 2)
9313 /* Generate a 32-bit opcode. */
9314 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9315 inst
.instruction
|= Rd
<< 8;
9316 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
9317 inst
.reloc
.pc_rel
= 1;
9321 /* Generate a 16-bit opcode. */
9322 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9323 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9324 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
9325 inst
.reloc
.pc_rel
= 1;
9327 inst
.instruction
|= Rd
<< 4;
9331 /* Arithmetic instructions for which there is just one 16-bit
9332 instruction encoding, and it allows only two low registers.
9333 For maximal compatibility with ARM syntax, we allow three register
9334 operands even when Thumb-32 instructions are not available, as long
9335 as the first two are identical. For instance, both "sbc r0,r1" and
9336 "sbc r0,r0,r1" are allowed. */
9342 Rd
= inst
.operands
[0].reg
;
9343 Rs
= (inst
.operands
[1].present
9344 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9345 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9346 Rn
= inst
.operands
[2].reg
;
9348 reject_bad_reg (Rd
);
9349 reject_bad_reg (Rs
);
9350 if (inst
.operands
[2].isreg
)
9351 reject_bad_reg (Rn
);
9355 if (!inst
.operands
[2].isreg
)
9357 /* For an immediate, we always generate a 32-bit opcode;
9358 section relaxation will shrink it later if possible. */
9359 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9360 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9361 inst
.instruction
|= Rd
<< 8;
9362 inst
.instruction
|= Rs
<< 16;
9363 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9369 /* See if we can do this with a 16-bit instruction. */
9370 if (THUMB_SETS_FLAGS (inst
.instruction
))
9371 narrow
= !in_it_block ();
9373 narrow
= in_it_block ();
9375 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9377 if (inst
.operands
[2].shifted
)
9379 if (inst
.size_req
== 4)
9385 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9386 inst
.instruction
|= Rd
;
9387 inst
.instruction
|= Rn
<< 3;
9391 /* If we get here, it can't be done in 16 bits. */
9392 constraint (inst
.operands
[2].shifted
9393 && inst
.operands
[2].immisreg
,
9394 _("shift must be constant"));
9395 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9396 inst
.instruction
|= Rd
<< 8;
9397 inst
.instruction
|= Rs
<< 16;
9398 encode_thumb32_shifted_operand (2);
9403 /* On its face this is a lie - the instruction does set the
9404 flags. However, the only supported mnemonic in this mode
9406 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9408 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9409 _("unshifted register required"));
9410 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9411 constraint (Rd
!= Rs
,
9412 _("dest and source1 must be the same register"));
9414 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9415 inst
.instruction
|= Rd
;
9416 inst
.instruction
|= Rn
<< 3;
9420 /* Similarly, but for instructions where the arithmetic operation is
9421 commutative, so we can allow either of them to be different from
9422 the destination operand in a 16-bit instruction. For instance, all
9423 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9430 Rd
= inst
.operands
[0].reg
;
9431 Rs
= (inst
.operands
[1].present
9432 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9433 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9434 Rn
= inst
.operands
[2].reg
;
9436 reject_bad_reg (Rd
);
9437 reject_bad_reg (Rs
);
9438 if (inst
.operands
[2].isreg
)
9439 reject_bad_reg (Rn
);
9443 if (!inst
.operands
[2].isreg
)
9445 /* For an immediate, we always generate a 32-bit opcode;
9446 section relaxation will shrink it later if possible. */
9447 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9448 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9449 inst
.instruction
|= Rd
<< 8;
9450 inst
.instruction
|= Rs
<< 16;
9451 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9457 /* See if we can do this with a 16-bit instruction. */
9458 if (THUMB_SETS_FLAGS (inst
.instruction
))
9459 narrow
= !in_it_block ();
9461 narrow
= in_it_block ();
9463 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9465 if (inst
.operands
[2].shifted
)
9467 if (inst
.size_req
== 4)
9474 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9475 inst
.instruction
|= Rd
;
9476 inst
.instruction
|= Rn
<< 3;
9481 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9482 inst
.instruction
|= Rd
;
9483 inst
.instruction
|= Rs
<< 3;
9488 /* If we get here, it can't be done in 16 bits. */
9489 constraint (inst
.operands
[2].shifted
9490 && inst
.operands
[2].immisreg
,
9491 _("shift must be constant"));
9492 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9493 inst
.instruction
|= Rd
<< 8;
9494 inst
.instruction
|= Rs
<< 16;
9495 encode_thumb32_shifted_operand (2);
9500 /* On its face this is a lie - the instruction does set the
9501 flags. However, the only supported mnemonic in this mode
9503 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9505 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9506 _("unshifted register required"));
9507 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9509 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9510 inst
.instruction
|= Rd
;
9513 inst
.instruction
|= Rn
<< 3;
9515 inst
.instruction
|= Rs
<< 3;
9517 constraint (1, _("dest must overlap one source register"));
9524 if (inst
.operands
[0].present
)
9526 constraint ((inst
.instruction
& 0xf0) != 0x40
9527 && inst
.operands
[0].imm
> 0xf
9528 && inst
.operands
[0].imm
< 0x0,
9529 _("bad barrier type"));
9530 inst
.instruction
|= inst
.operands
[0].imm
;
9533 inst
.instruction
|= 0xf;
9540 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
9541 constraint (msb
> 32, _("bit-field extends past end of register"));
9542 /* The instruction encoding stores the LSB and MSB,
9543 not the LSB and width. */
9544 Rd
= inst
.operands
[0].reg
;
9545 reject_bad_reg (Rd
);
9546 inst
.instruction
|= Rd
<< 8;
9547 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
9548 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
9549 inst
.instruction
|= msb
- 1;
9558 Rd
= inst
.operands
[0].reg
;
9559 reject_bad_reg (Rd
);
9561 /* #0 in second position is alternative syntax for bfc, which is
9562 the same instruction but with REG_PC in the Rm field. */
9563 if (!inst
.operands
[1].isreg
)
9567 Rn
= inst
.operands
[1].reg
;
9568 reject_bad_reg (Rn
);
9571 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
9572 constraint (msb
> 32, _("bit-field extends past end of register"));
9573 /* The instruction encoding stores the LSB and MSB,
9574 not the LSB and width. */
9575 inst
.instruction
|= Rd
<< 8;
9576 inst
.instruction
|= Rn
<< 16;
9577 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9578 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9579 inst
.instruction
|= msb
- 1;
9587 Rd
= inst
.operands
[0].reg
;
9588 Rn
= inst
.operands
[1].reg
;
9590 reject_bad_reg (Rd
);
9591 reject_bad_reg (Rn
);
9593 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
9594 _("bit-field extends past end of register"));
9595 inst
.instruction
|= Rd
<< 8;
9596 inst
.instruction
|= Rn
<< 16;
9597 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9598 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9599 inst
.instruction
|= inst
.operands
[3].imm
- 1;
9602 /* ARM V5 Thumb BLX (argument parse)
9603 BLX <target_addr> which is BLX(1)
9604 BLX <Rm> which is BLX(2)
9605 Unfortunately, there are two different opcodes for this mnemonic.
9606 So, the insns[].value is not used, and the code here zaps values
9607 into inst.instruction.
9609 ??? How to take advantage of the additional two bits of displacement
9610 available in Thumb32 mode? Need new relocation? */
9615 set_it_insn_type_last ();
9617 if (inst
.operands
[0].isreg
)
9619 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9620 /* We have a register, so this is BLX(2). */
9621 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9625 /* No register. This must be BLX(1). */
9626 inst
.instruction
= 0xf000e800;
9627 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
9628 inst
.reloc
.pc_rel
= 1;
9639 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
9643 /* Conditional branches inside IT blocks are encoded as unconditional
9650 if (cond
!= COND_ALWAYS
)
9651 opcode
= T_MNEM_bcond
;
9653 opcode
= inst
.instruction
;
9655 if (unified_syntax
&& inst
.size_req
== 4)
9657 inst
.instruction
= THUMB_OP32(opcode
);
9658 if (cond
== COND_ALWAYS
)
9659 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
9662 gas_assert (cond
!= 0xF);
9663 inst
.instruction
|= cond
<< 22;
9664 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
9669 inst
.instruction
= THUMB_OP16(opcode
);
9670 if (cond
== COND_ALWAYS
)
9671 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
9674 inst
.instruction
|= cond
<< 8;
9675 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
9677 /* Allow section relaxation. */
9678 if (unified_syntax
&& inst
.size_req
!= 2)
9679 inst
.relax
= opcode
;
9682 inst
.reloc
.pc_rel
= 1;
9688 constraint (inst
.cond
!= COND_ALWAYS
,
9689 _("instruction is always unconditional"));
9690 if (inst
.operands
[0].present
)
9692 constraint (inst
.operands
[0].imm
> 255,
9693 _("immediate value out of range"));
9694 inst
.instruction
|= inst
.operands
[0].imm
;
9695 set_it_insn_type (NEUTRAL_IT_INSN
);
9700 do_t_branch23 (void)
9702 set_it_insn_type_last ();
9703 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
9704 inst
.reloc
.pc_rel
= 1;
9706 #if defined(OBJ_COFF)
9707 /* If the destination of the branch is a defined symbol which does not have
9708 the THUMB_FUNC attribute, then we must be calling a function which has
9709 the (interfacearm) attribute. We look for the Thumb entry point to that
9710 function and change the branch to refer to that function instead. */
9711 if ( inst
.reloc
.exp
.X_op
== O_symbol
9712 && inst
.reloc
.exp
.X_add_symbol
!= NULL
9713 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
9714 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
9715 inst
.reloc
.exp
.X_add_symbol
=
9716 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
9723 set_it_insn_type_last ();
9724 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9725 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9726 should cause the alignment to be checked once it is known. This is
9727 because BX PC only works if the instruction is word aligned. */
9735 set_it_insn_type_last ();
9736 Rm
= inst
.operands
[0].reg
;
9737 reject_bad_reg (Rm
);
9738 inst
.instruction
|= Rm
<< 16;
9747 Rd
= inst
.operands
[0].reg
;
9748 Rm
= inst
.operands
[1].reg
;
9750 reject_bad_reg (Rd
);
9751 reject_bad_reg (Rm
);
9753 inst
.instruction
|= Rd
<< 8;
9754 inst
.instruction
|= Rm
<< 16;
9755 inst
.instruction
|= Rm
;
9761 set_it_insn_type (OUTSIDE_IT_INSN
);
9762 inst
.instruction
|= inst
.operands
[0].imm
;
9768 set_it_insn_type (OUTSIDE_IT_INSN
);
9770 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
9771 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
9773 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
9774 inst
.instruction
= 0xf3af8000;
9775 inst
.instruction
|= imod
<< 9;
9776 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
9777 if (inst
.operands
[1].present
)
9778 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
9782 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
9783 && (inst
.operands
[0].imm
& 4),
9784 _("selected processor does not support 'A' form "
9785 "of this instruction"));
9786 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
9787 _("Thumb does not support the 2-argument "
9788 "form of this instruction"));
9789 inst
.instruction
|= inst
.operands
[0].imm
;
9793 /* THUMB CPY instruction (argument parse). */
9798 if (inst
.size_req
== 4)
9800 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
9801 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9802 inst
.instruction
|= inst
.operands
[1].reg
;
9806 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9807 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9808 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9815 set_it_insn_type (OUTSIDE_IT_INSN
);
9816 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9817 inst
.instruction
|= inst
.operands
[0].reg
;
9818 inst
.reloc
.pc_rel
= 1;
9819 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
9825 inst
.instruction
|= inst
.operands
[0].imm
;
9831 unsigned Rd
, Rn
, Rm
;
9833 Rd
= inst
.operands
[0].reg
;
9834 Rn
= (inst
.operands
[1].present
9835 ? inst
.operands
[1].reg
: Rd
);
9836 Rm
= inst
.operands
[2].reg
;
9838 reject_bad_reg (Rd
);
9839 reject_bad_reg (Rn
);
9840 reject_bad_reg (Rm
);
9842 inst
.instruction
|= Rd
<< 8;
9843 inst
.instruction
|= Rn
<< 16;
9844 inst
.instruction
|= Rm
;
9850 if (unified_syntax
&& inst
.size_req
== 4)
9851 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9853 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9859 unsigned int cond
= inst
.operands
[0].imm
;
9861 set_it_insn_type (IT_INSN
);
9862 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
9865 /* If the condition is a negative condition, invert the mask. */
9866 if ((cond
& 0x1) == 0x0)
9868 unsigned int mask
= inst
.instruction
& 0x000f;
9870 if ((mask
& 0x7) == 0)
9871 /* no conversion needed */;
9872 else if ((mask
& 0x3) == 0)
9874 else if ((mask
& 0x1) == 0)
9879 inst
.instruction
&= 0xfff0;
9880 inst
.instruction
|= mask
;
9883 inst
.instruction
|= cond
<< 4;
9886 /* Helper function used for both push/pop and ldm/stm. */
9888 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
9892 load
= (inst
.instruction
& (1 << 20)) != 0;
9894 if (mask
& (1 << 13))
9895 inst
.error
= _("SP not allowed in register list");
9897 if ((mask
& (1 << base
)) != 0
9899 inst
.error
= _("having the base register in the register list when "
9900 "using write back is UNPREDICTABLE");
9904 if (mask
& (1 << 15))
9906 if (mask
& (1 << 14))
9907 inst
.error
= _("LR and PC should not both be in register list");
9909 set_it_insn_type_last ();
9914 if (mask
& (1 << 15))
9915 inst
.error
= _("PC not allowed in register list");
9918 if ((mask
& (mask
- 1)) == 0)
9920 /* Single register transfers implemented as str/ldr. */
9923 if (inst
.instruction
& (1 << 23))
9924 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
9926 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
9930 if (inst
.instruction
& (1 << 23))
9931 inst
.instruction
= 0x00800000; /* ia -> [base] */
9933 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
9936 inst
.instruction
|= 0xf8400000;
9938 inst
.instruction
|= 0x00100000;
9940 mask
= ffs (mask
) - 1;
9944 inst
.instruction
|= WRITE_BACK
;
9946 inst
.instruction
|= mask
;
9947 inst
.instruction
|= base
<< 16;
9953 /* This really doesn't seem worth it. */
9954 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9955 _("expression too complex"));
9956 constraint (inst
.operands
[1].writeback
,
9957 _("Thumb load/store multiple does not support {reglist}^"));
9965 /* See if we can use a 16-bit instruction. */
9966 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9967 && inst
.size_req
!= 4
9968 && !(inst
.operands
[1].imm
& ~0xff))
9970 mask
= 1 << inst
.operands
[0].reg
;
9972 if (inst
.operands
[0].reg
<= 7)
9974 if (inst
.instruction
== T_MNEM_stmia
9975 ? inst
.operands
[0].writeback
9976 : (inst
.operands
[0].writeback
9977 == !(inst
.operands
[1].imm
& mask
)))
9979 if (inst
.instruction
== T_MNEM_stmia
9980 && (inst
.operands
[1].imm
& mask
)
9981 && (inst
.operands
[1].imm
& (mask
- 1)))
9982 as_warn (_("value stored for r%d is UNKNOWN"),
9983 inst
.operands
[0].reg
);
9985 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9986 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9987 inst
.instruction
|= inst
.operands
[1].imm
;
9990 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
9992 /* This means 1 register in reg list one of 3 situations:
9993 1. Instruction is stmia, but without writeback.
9994 2. lmdia without writeback, but with Rn not in
9996 3. ldmia with writeback, but with Rn in reglist.
9997 Case 3 is UNPREDICTABLE behaviour, so we handle
9998 case 1 and 2 which can be converted into a 16-bit
9999 str or ldr. The SP cases are handled below. */
10000 unsigned long opcode
;
10001 /* First, record an error for Case 3. */
10002 if (inst
.operands
[1].imm
& mask
10003 && inst
.operands
[0].writeback
)
10005 _("having the base register in the register list when "
10006 "using write back is UNPREDICTABLE");
10008 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
10010 inst
.instruction
= THUMB_OP16 (opcode
);
10011 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
10012 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
10016 else if (inst
.operands
[0] .reg
== REG_SP
)
10018 if (inst
.operands
[0].writeback
)
10021 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
10022 ? T_MNEM_push
: T_MNEM_pop
);
10023 inst
.instruction
|= inst
.operands
[1].imm
;
10026 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
10029 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
10030 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
10031 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
10039 if (inst
.instruction
< 0xffff)
10040 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10042 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
10043 inst
.operands
[0].writeback
);
10048 constraint (inst
.operands
[0].reg
> 7
10049 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
10050 constraint (inst
.instruction
!= T_MNEM_ldmia
10051 && inst
.instruction
!= T_MNEM_stmia
,
10052 _("Thumb-2 instruction only valid in unified syntax"));
10053 if (inst
.instruction
== T_MNEM_stmia
)
10055 if (!inst
.operands
[0].writeback
)
10056 as_warn (_("this instruction will write back the base register"));
10057 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
10058 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
10059 as_warn (_("value stored for r%d is UNKNOWN"),
10060 inst
.operands
[0].reg
);
10064 if (!inst
.operands
[0].writeback
10065 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
10066 as_warn (_("this instruction will write back the base register"));
10067 else if (inst
.operands
[0].writeback
10068 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
10069 as_warn (_("this instruction will not write back the base register"));
10072 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10073 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10074 inst
.instruction
|= inst
.operands
[1].imm
;
10081 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
10082 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
10083 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
10084 || inst
.operands
[1].negative
,
10087 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
10089 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10090 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10091 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
10097 if (!inst
.operands
[1].present
)
10099 constraint (inst
.operands
[0].reg
== REG_LR
,
10100 _("r14 not allowed as first register "
10101 "when second register is omitted"));
10102 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
10104 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
10107 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10108 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
10109 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10115 unsigned long opcode
;
10118 if (inst
.operands
[0].isreg
10119 && !inst
.operands
[0].preind
10120 && inst
.operands
[0].reg
== REG_PC
)
10121 set_it_insn_type_last ();
10123 opcode
= inst
.instruction
;
10124 if (unified_syntax
)
10126 if (!inst
.operands
[1].isreg
)
10128 if (opcode
<= 0xffff)
10129 inst
.instruction
= THUMB_OP32 (opcode
);
10130 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
10133 if (inst
.operands
[1].isreg
10134 && !inst
.operands
[1].writeback
10135 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
10136 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
10137 && opcode
<= 0xffff
10138 && inst
.size_req
!= 4)
10140 /* Insn may have a 16-bit form. */
10141 Rn
= inst
.operands
[1].reg
;
10142 if (inst
.operands
[1].immisreg
)
10144 inst
.instruction
= THUMB_OP16 (opcode
);
10146 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
10148 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
10149 reject_bad_reg (inst
.operands
[1].imm
);
10151 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
10152 && opcode
!= T_MNEM_ldrsb
)
10153 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
10154 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
10161 if (inst
.reloc
.pc_rel
)
10162 opcode
= T_MNEM_ldr_pc2
;
10164 opcode
= T_MNEM_ldr_pc
;
10168 if (opcode
== T_MNEM_ldr
)
10169 opcode
= T_MNEM_ldr_sp
;
10171 opcode
= T_MNEM_str_sp
;
10173 inst
.instruction
= inst
.operands
[0].reg
<< 8;
10177 inst
.instruction
= inst
.operands
[0].reg
;
10178 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10180 inst
.instruction
|= THUMB_OP16 (opcode
);
10181 if (inst
.size_req
== 2)
10182 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
10184 inst
.relax
= opcode
;
10188 /* Definitely a 32-bit variant. */
10190 /* Do some validations regarding addressing modes. */
10191 if (inst
.operands
[1].immisreg
&& opcode
!= T_MNEM_ldr
10192 && opcode
!= T_MNEM_str
)
10193 reject_bad_reg (inst
.operands
[1].imm
);
10195 inst
.instruction
= THUMB_OP32 (opcode
);
10196 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10197 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
10201 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
10203 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
10205 /* Only [Rn,Rm] is acceptable. */
10206 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
10207 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
10208 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
10209 || inst
.operands
[1].negative
,
10210 _("Thumb does not support this addressing mode"));
10211 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10215 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10216 if (!inst
.operands
[1].isreg
)
10217 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
10220 constraint (!inst
.operands
[1].preind
10221 || inst
.operands
[1].shifted
10222 || inst
.operands
[1].writeback
,
10223 _("Thumb does not support this addressing mode"));
10224 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
10226 constraint (inst
.instruction
& 0x0600,
10227 _("byte or halfword not valid for base register"));
10228 constraint (inst
.operands
[1].reg
== REG_PC
10229 && !(inst
.instruction
& THUMB_LOAD_BIT
),
10230 _("r15 based store not allowed"));
10231 constraint (inst
.operands
[1].immisreg
,
10232 _("invalid base register for register offset"));
10234 if (inst
.operands
[1].reg
== REG_PC
)
10235 inst
.instruction
= T_OPCODE_LDR_PC
;
10236 else if (inst
.instruction
& THUMB_LOAD_BIT
)
10237 inst
.instruction
= T_OPCODE_LDR_SP
;
10239 inst
.instruction
= T_OPCODE_STR_SP
;
10241 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10242 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
10246 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
10247 if (!inst
.operands
[1].immisreg
)
10249 /* Immediate offset. */
10250 inst
.instruction
|= inst
.operands
[0].reg
;
10251 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10252 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
10256 /* Register offset. */
10257 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
10258 constraint (inst
.operands
[1].negative
,
10259 _("Thumb does not support this addressing mode"));
10262 switch (inst
.instruction
)
10264 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
10265 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
10266 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
10267 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
10268 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
10269 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
10270 case 0x5600 /* ldrsb */:
10271 case 0x5e00 /* ldrsh */: break;
10275 inst
.instruction
|= inst
.operands
[0].reg
;
10276 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10277 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
10283 if (!inst
.operands
[1].present
)
10285 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
10286 constraint (inst
.operands
[0].reg
== REG_LR
,
10287 _("r14 not allowed here"));
10289 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10290 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
10291 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
10297 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10298 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
10304 unsigned Rd
, Rn
, Rm
, Ra
;
10306 Rd
= inst
.operands
[0].reg
;
10307 Rn
= inst
.operands
[1].reg
;
10308 Rm
= inst
.operands
[2].reg
;
10309 Ra
= inst
.operands
[3].reg
;
10311 reject_bad_reg (Rd
);
10312 reject_bad_reg (Rn
);
10313 reject_bad_reg (Rm
);
10314 reject_bad_reg (Ra
);
10316 inst
.instruction
|= Rd
<< 8;
10317 inst
.instruction
|= Rn
<< 16;
10318 inst
.instruction
|= Rm
;
10319 inst
.instruction
|= Ra
<< 12;
10325 unsigned RdLo
, RdHi
, Rn
, Rm
;
10327 RdLo
= inst
.operands
[0].reg
;
10328 RdHi
= inst
.operands
[1].reg
;
10329 Rn
= inst
.operands
[2].reg
;
10330 Rm
= inst
.operands
[3].reg
;
10332 reject_bad_reg (RdLo
);
10333 reject_bad_reg (RdHi
);
10334 reject_bad_reg (Rn
);
10335 reject_bad_reg (Rm
);
10337 inst
.instruction
|= RdLo
<< 12;
10338 inst
.instruction
|= RdHi
<< 8;
10339 inst
.instruction
|= Rn
<< 16;
10340 inst
.instruction
|= Rm
;
10344 do_t_mov_cmp (void)
10348 Rn
= inst
.operands
[0].reg
;
10349 Rm
= inst
.operands
[1].reg
;
10352 set_it_insn_type_last ();
10354 if (unified_syntax
)
10356 int r0off
= (inst
.instruction
== T_MNEM_mov
10357 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
10358 unsigned long opcode
;
10359 bfd_boolean narrow
;
10360 bfd_boolean low_regs
;
10362 low_regs
= (Rn
<= 7 && Rm
<= 7);
10363 opcode
= inst
.instruction
;
10364 if (in_it_block ())
10365 narrow
= opcode
!= T_MNEM_movs
;
10367 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
10368 if (inst
.size_req
== 4
10369 || inst
.operands
[1].shifted
)
10372 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10373 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
10374 && !inst
.operands
[1].shifted
10378 inst
.instruction
= T2_SUBS_PC_LR
;
10382 if (opcode
== T_MNEM_cmp
)
10384 constraint (Rn
== REG_PC
, BAD_PC
);
10387 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10389 warn_deprecated_sp (Rm
);
10390 /* R15 was documented as a valid choice for Rm in ARMv6,
10391 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10392 tools reject R15, so we do too. */
10393 constraint (Rm
== REG_PC
, BAD_PC
);
10396 reject_bad_reg (Rm
);
10398 else if (opcode
== T_MNEM_mov
10399 || opcode
== T_MNEM_movs
)
10401 if (inst
.operands
[1].isreg
)
10403 if (opcode
== T_MNEM_movs
)
10405 reject_bad_reg (Rn
);
10406 reject_bad_reg (Rm
);
10410 /* This is mov.n. */
10411 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
10412 && (Rm
== REG_SP
|| Rm
== REG_PC
))
10414 as_warn (_("Use of r%u as a source register is "
10415 "deprecated when r%u is the destination "
10416 "register."), Rm
, Rn
);
10421 /* This is mov.w. */
10422 constraint (Rn
== REG_PC
, BAD_PC
);
10423 constraint (Rm
== REG_PC
, BAD_PC
);
10424 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
10428 reject_bad_reg (Rn
);
10431 if (!inst
.operands
[1].isreg
)
10433 /* Immediate operand. */
10434 if (!in_it_block () && opcode
== T_MNEM_mov
)
10436 if (low_regs
&& narrow
)
10438 inst
.instruction
= THUMB_OP16 (opcode
);
10439 inst
.instruction
|= Rn
<< 8;
10440 if (inst
.size_req
== 2)
10441 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10443 inst
.relax
= opcode
;
10447 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10448 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10449 inst
.instruction
|= Rn
<< r0off
;
10450 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10453 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
10454 && (inst
.instruction
== T_MNEM_mov
10455 || inst
.instruction
== T_MNEM_movs
))
10457 /* Register shifts are encoded as separate shift instructions. */
10458 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
10460 if (in_it_block ())
10465 if (inst
.size_req
== 4)
10468 if (!low_regs
|| inst
.operands
[1].imm
> 7)
10474 switch (inst
.operands
[1].shift_kind
)
10477 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
10480 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
10483 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
10486 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
10492 inst
.instruction
= opcode
;
10495 inst
.instruction
|= Rn
;
10496 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
10501 inst
.instruction
|= CONDS_BIT
;
10503 inst
.instruction
|= Rn
<< 8;
10504 inst
.instruction
|= Rm
<< 16;
10505 inst
.instruction
|= inst
.operands
[1].imm
;
10510 /* Some mov with immediate shift have narrow variants.
10511 Register shifts are handled above. */
10512 if (low_regs
&& inst
.operands
[1].shifted
10513 && (inst
.instruction
== T_MNEM_mov
10514 || inst
.instruction
== T_MNEM_movs
))
10516 if (in_it_block ())
10517 narrow
= (inst
.instruction
== T_MNEM_mov
);
10519 narrow
= (inst
.instruction
== T_MNEM_movs
);
10524 switch (inst
.operands
[1].shift_kind
)
10526 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10527 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10528 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10529 default: narrow
= FALSE
; break;
10535 inst
.instruction
|= Rn
;
10536 inst
.instruction
|= Rm
<< 3;
10537 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10541 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10542 inst
.instruction
|= Rn
<< r0off
;
10543 encode_thumb32_shifted_operand (1);
10547 switch (inst
.instruction
)
10550 inst
.instruction
= T_OPCODE_MOV_HR
;
10551 inst
.instruction
|= (Rn
& 0x8) << 4;
10552 inst
.instruction
|= (Rn
& 0x7);
10553 inst
.instruction
|= Rm
<< 3;
10557 /* We know we have low registers at this point.
10558 Generate LSLS Rd, Rs, #0. */
10559 inst
.instruction
= T_OPCODE_LSL_I
;
10560 inst
.instruction
|= Rn
;
10561 inst
.instruction
|= Rm
<< 3;
10567 inst
.instruction
= T_OPCODE_CMP_LR
;
10568 inst
.instruction
|= Rn
;
10569 inst
.instruction
|= Rm
<< 3;
10573 inst
.instruction
= T_OPCODE_CMP_HR
;
10574 inst
.instruction
|= (Rn
& 0x8) << 4;
10575 inst
.instruction
|= (Rn
& 0x7);
10576 inst
.instruction
|= Rm
<< 3;
10583 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10585 /* PR 10443: Do not silently ignore shifted operands. */
10586 constraint (inst
.operands
[1].shifted
,
10587 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10589 if (inst
.operands
[1].isreg
)
10591 if (Rn
< 8 && Rm
< 8)
10593 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10594 since a MOV instruction produces unpredictable results. */
10595 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10596 inst
.instruction
= T_OPCODE_ADD_I3
;
10598 inst
.instruction
= T_OPCODE_CMP_LR
;
10600 inst
.instruction
|= Rn
;
10601 inst
.instruction
|= Rm
<< 3;
10605 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10606 inst
.instruction
= T_OPCODE_MOV_HR
;
10608 inst
.instruction
= T_OPCODE_CMP_HR
;
10614 constraint (Rn
> 7,
10615 _("only lo regs allowed with immediate"));
10616 inst
.instruction
|= Rn
<< 8;
10617 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10628 top
= (inst
.instruction
& 0x00800000) != 0;
10629 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
10631 constraint (top
, _(":lower16: not allowed this instruction"));
10632 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
10634 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
10636 constraint (!top
, _(":upper16: not allowed this instruction"));
10637 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
10640 Rd
= inst
.operands
[0].reg
;
10641 reject_bad_reg (Rd
);
10643 inst
.instruction
|= Rd
<< 8;
10644 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
10646 imm
= inst
.reloc
.exp
.X_add_number
;
10647 inst
.instruction
|= (imm
& 0xf000) << 4;
10648 inst
.instruction
|= (imm
& 0x0800) << 15;
10649 inst
.instruction
|= (imm
& 0x0700) << 4;
10650 inst
.instruction
|= (imm
& 0x00ff);
10655 do_t_mvn_tst (void)
10659 Rn
= inst
.operands
[0].reg
;
10660 Rm
= inst
.operands
[1].reg
;
10662 if (inst
.instruction
== T_MNEM_cmp
10663 || inst
.instruction
== T_MNEM_cmn
)
10664 constraint (Rn
== REG_PC
, BAD_PC
);
10666 reject_bad_reg (Rn
);
10667 reject_bad_reg (Rm
);
10669 if (unified_syntax
)
10671 int r0off
= (inst
.instruction
== T_MNEM_mvn
10672 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
10673 bfd_boolean narrow
;
10675 if (inst
.size_req
== 4
10676 || inst
.instruction
> 0xffff
10677 || inst
.operands
[1].shifted
10678 || Rn
> 7 || Rm
> 7)
10680 else if (inst
.instruction
== T_MNEM_cmn
)
10682 else if (THUMB_SETS_FLAGS (inst
.instruction
))
10683 narrow
= !in_it_block ();
10685 narrow
= in_it_block ();
10687 if (!inst
.operands
[1].isreg
)
10689 /* For an immediate, we always generate a 32-bit opcode;
10690 section relaxation will shrink it later if possible. */
10691 if (inst
.instruction
< 0xffff)
10692 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10693 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10694 inst
.instruction
|= Rn
<< r0off
;
10695 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10699 /* See if we can do this with a 16-bit instruction. */
10702 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10703 inst
.instruction
|= Rn
;
10704 inst
.instruction
|= Rm
<< 3;
10708 constraint (inst
.operands
[1].shifted
10709 && inst
.operands
[1].immisreg
,
10710 _("shift must be constant"));
10711 if (inst
.instruction
< 0xffff)
10712 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10713 inst
.instruction
|= Rn
<< r0off
;
10714 encode_thumb32_shifted_operand (1);
10720 constraint (inst
.instruction
> 0xffff
10721 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
10722 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
10723 _("unshifted register required"));
10724 constraint (Rn
> 7 || Rm
> 7,
10727 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10728 inst
.instruction
|= Rn
;
10729 inst
.instruction
|= Rm
<< 3;
10739 if (do_vfp_nsyn_mrs () == SUCCESS
)
10742 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
10745 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10746 _("selected processor does not support "
10747 "requested special purpose register"));
10751 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10752 _("selected processor does not support "
10753 "requested special purpose register"));
10754 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10755 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
10756 _("'CPSR' or 'SPSR' expected"));
10759 Rd
= inst
.operands
[0].reg
;
10760 reject_bad_reg (Rd
);
10762 inst
.instruction
|= Rd
<< 8;
10763 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10764 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
10773 if (do_vfp_nsyn_msr () == SUCCESS
)
10776 constraint (!inst
.operands
[1].isreg
,
10777 _("Thumb encoding does not support an immediate here"));
10778 flags
= inst
.operands
[0].imm
;
10781 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10782 _("selected processor does not support "
10783 "requested special purpose register"));
10787 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10788 _("selected processor does not support "
10789 "requested special purpose register"));
10793 Rn
= inst
.operands
[1].reg
;
10794 reject_bad_reg (Rn
);
10796 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10797 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
10798 inst
.instruction
|= (flags
& 0xff);
10799 inst
.instruction
|= Rn
<< 16;
10805 bfd_boolean narrow
;
10806 unsigned Rd
, Rn
, Rm
;
10808 if (!inst
.operands
[2].present
)
10809 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
10811 Rd
= inst
.operands
[0].reg
;
10812 Rn
= inst
.operands
[1].reg
;
10813 Rm
= inst
.operands
[2].reg
;
10815 if (unified_syntax
)
10817 if (inst
.size_req
== 4
10823 else if (inst
.instruction
== T_MNEM_muls
)
10824 narrow
= !in_it_block ();
10826 narrow
= in_it_block ();
10830 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
10831 constraint (Rn
> 7 || Rm
> 7,
10838 /* 16-bit MULS/Conditional MUL. */
10839 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10840 inst
.instruction
|= Rd
;
10843 inst
.instruction
|= Rm
<< 3;
10845 inst
.instruction
|= Rn
<< 3;
10847 constraint (1, _("dest must overlap one source register"));
10851 constraint (inst
.instruction
!= T_MNEM_mul
,
10852 _("Thumb-2 MUL must not set flags"));
10854 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10855 inst
.instruction
|= Rd
<< 8;
10856 inst
.instruction
|= Rn
<< 16;
10857 inst
.instruction
|= Rm
<< 0;
10859 reject_bad_reg (Rd
);
10860 reject_bad_reg (Rn
);
10861 reject_bad_reg (Rm
);
10868 unsigned RdLo
, RdHi
, Rn
, Rm
;
10870 RdLo
= inst
.operands
[0].reg
;
10871 RdHi
= inst
.operands
[1].reg
;
10872 Rn
= inst
.operands
[2].reg
;
10873 Rm
= inst
.operands
[3].reg
;
10875 reject_bad_reg (RdLo
);
10876 reject_bad_reg (RdHi
);
10877 reject_bad_reg (Rn
);
10878 reject_bad_reg (Rm
);
10880 inst
.instruction
|= RdLo
<< 12;
10881 inst
.instruction
|= RdHi
<< 8;
10882 inst
.instruction
|= Rn
<< 16;
10883 inst
.instruction
|= Rm
;
10886 as_tsktsk (_("rdhi and rdlo must be different"));
10892 set_it_insn_type (NEUTRAL_IT_INSN
);
10894 if (unified_syntax
)
10896 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
10898 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10899 inst
.instruction
|= inst
.operands
[0].imm
;
10903 /* PR9722: Check for Thumb2 availability before
10904 generating a thumb2 nop instruction. */
10905 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
10907 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10908 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
10911 inst
.instruction
= 0x46c0;
10916 constraint (inst
.operands
[0].present
,
10917 _("Thumb does not support NOP with hints"));
10918 inst
.instruction
= 0x46c0;
10925 if (unified_syntax
)
10927 bfd_boolean narrow
;
10929 if (THUMB_SETS_FLAGS (inst
.instruction
))
10930 narrow
= !in_it_block ();
10932 narrow
= in_it_block ();
10933 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10935 if (inst
.size_req
== 4)
10940 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10941 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10942 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10946 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10947 inst
.instruction
|= inst
.operands
[0].reg
;
10948 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10953 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
10955 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10957 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10958 inst
.instruction
|= inst
.operands
[0].reg
;
10959 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10968 Rd
= inst
.operands
[0].reg
;
10969 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
10971 reject_bad_reg (Rd
);
10972 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10973 reject_bad_reg (Rn
);
10975 inst
.instruction
|= Rd
<< 8;
10976 inst
.instruction
|= Rn
<< 16;
10978 if (!inst
.operands
[2].isreg
)
10980 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10981 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10987 Rm
= inst
.operands
[2].reg
;
10988 reject_bad_reg (Rm
);
10990 constraint (inst
.operands
[2].shifted
10991 && inst
.operands
[2].immisreg
,
10992 _("shift must be constant"));
10993 encode_thumb32_shifted_operand (2);
11000 unsigned Rd
, Rn
, Rm
;
11002 Rd
= inst
.operands
[0].reg
;
11003 Rn
= inst
.operands
[1].reg
;
11004 Rm
= inst
.operands
[2].reg
;
11006 reject_bad_reg (Rd
);
11007 reject_bad_reg (Rn
);
11008 reject_bad_reg (Rm
);
11010 inst
.instruction
|= Rd
<< 8;
11011 inst
.instruction
|= Rn
<< 16;
11012 inst
.instruction
|= Rm
;
11013 if (inst
.operands
[3].present
)
11015 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
11016 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11017 _("expression too complex"));
11018 inst
.instruction
|= (val
& 0x1c) << 10;
11019 inst
.instruction
|= (val
& 0x03) << 6;
11026 if (!inst
.operands
[3].present
)
11030 inst
.instruction
&= ~0x00000020;
11032 /* PR 10168. Swap the Rm and Rn registers. */
11033 Rtmp
= inst
.operands
[1].reg
;
11034 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
11035 inst
.operands
[2].reg
= Rtmp
;
11043 if (inst
.operands
[0].immisreg
)
11044 reject_bad_reg (inst
.operands
[0].imm
);
11046 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11050 do_t_push_pop (void)
11054 constraint (inst
.operands
[0].writeback
,
11055 _("push/pop do not support {reglist}^"));
11056 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11057 _("expression too complex"));
11059 mask
= inst
.operands
[0].imm
;
11060 if ((mask
& ~0xff) == 0)
11061 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
11062 else if ((inst
.instruction
== T_MNEM_push
11063 && (mask
& ~0xff) == 1 << REG_LR
)
11064 || (inst
.instruction
== T_MNEM_pop
11065 && (mask
& ~0xff) == 1 << REG_PC
))
11067 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11068 inst
.instruction
|= THUMB_PP_PC_LR
;
11069 inst
.instruction
|= mask
& 0xff;
11071 else if (unified_syntax
)
11073 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11074 encode_thumb2_ldmstm (13, mask
, TRUE
);
11078 inst
.error
= _("invalid register list to push/pop instruction");
11088 Rd
= inst
.operands
[0].reg
;
11089 Rm
= inst
.operands
[1].reg
;
11091 reject_bad_reg (Rd
);
11092 reject_bad_reg (Rm
);
11094 inst
.instruction
|= Rd
<< 8;
11095 inst
.instruction
|= Rm
<< 16;
11096 inst
.instruction
|= Rm
;
11104 Rd
= inst
.operands
[0].reg
;
11105 Rm
= inst
.operands
[1].reg
;
11107 reject_bad_reg (Rd
);
11108 reject_bad_reg (Rm
);
11110 if (Rd
<= 7 && Rm
<= 7
11111 && inst
.size_req
!= 4)
11113 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11114 inst
.instruction
|= Rd
;
11115 inst
.instruction
|= Rm
<< 3;
11117 else if (unified_syntax
)
11119 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11120 inst
.instruction
|= Rd
<< 8;
11121 inst
.instruction
|= Rm
<< 16;
11122 inst
.instruction
|= Rm
;
11125 inst
.error
= BAD_HIREG
;
11133 Rd
= inst
.operands
[0].reg
;
11134 Rm
= inst
.operands
[1].reg
;
11136 reject_bad_reg (Rd
);
11137 reject_bad_reg (Rm
);
11139 inst
.instruction
|= Rd
<< 8;
11140 inst
.instruction
|= Rm
;
11148 Rd
= inst
.operands
[0].reg
;
11149 Rs
= (inst
.operands
[1].present
11150 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11151 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11153 reject_bad_reg (Rd
);
11154 reject_bad_reg (Rs
);
11155 if (inst
.operands
[2].isreg
)
11156 reject_bad_reg (inst
.operands
[2].reg
);
11158 inst
.instruction
|= Rd
<< 8;
11159 inst
.instruction
|= Rs
<< 16;
11160 if (!inst
.operands
[2].isreg
)
11162 bfd_boolean narrow
;
11164 if ((inst
.instruction
& 0x00100000) != 0)
11165 narrow
= !in_it_block ();
11167 narrow
= in_it_block ();
11169 if (Rd
> 7 || Rs
> 7)
11172 if (inst
.size_req
== 4 || !unified_syntax
)
11175 if (inst
.reloc
.exp
.X_op
!= O_constant
11176 || inst
.reloc
.exp
.X_add_number
!= 0)
11179 /* Turn rsb #0 into 16-bit neg. We should probably do this via
11180 relaxation, but it doesn't seem worth the hassle. */
11183 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11184 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
11185 inst
.instruction
|= Rs
<< 3;
11186 inst
.instruction
|= Rd
;
11190 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11191 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11195 encode_thumb32_shifted_operand (2);
11201 set_it_insn_type (OUTSIDE_IT_INSN
);
11202 if (inst
.operands
[0].imm
)
11203 inst
.instruction
|= 0x8;
11209 if (!inst
.operands
[1].present
)
11210 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
11212 if (unified_syntax
)
11214 bfd_boolean narrow
;
11217 switch (inst
.instruction
)
11220 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
11222 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
11224 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
11226 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
11230 if (THUMB_SETS_FLAGS (inst
.instruction
))
11231 narrow
= !in_it_block ();
11233 narrow
= in_it_block ();
11234 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
11236 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
11238 if (inst
.operands
[2].isreg
11239 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
11240 || inst
.operands
[2].reg
> 7))
11242 if (inst
.size_req
== 4)
11245 reject_bad_reg (inst
.operands
[0].reg
);
11246 reject_bad_reg (inst
.operands
[1].reg
);
11250 if (inst
.operands
[2].isreg
)
11252 reject_bad_reg (inst
.operands
[2].reg
);
11253 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11254 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11255 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11256 inst
.instruction
|= inst
.operands
[2].reg
;
11260 inst
.operands
[1].shifted
= 1;
11261 inst
.operands
[1].shift_kind
= shift_kind
;
11262 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
11263 ? T_MNEM_movs
: T_MNEM_mov
);
11264 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11265 encode_thumb32_shifted_operand (1);
11266 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11267 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11272 if (inst
.operands
[2].isreg
)
11274 switch (shift_kind
)
11276 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
11277 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
11278 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
11279 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
11283 inst
.instruction
|= inst
.operands
[0].reg
;
11284 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
11288 switch (shift_kind
)
11290 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11291 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11292 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11295 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11296 inst
.instruction
|= inst
.operands
[0].reg
;
11297 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11303 constraint (inst
.operands
[0].reg
> 7
11304 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
11305 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11307 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
11309 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
11310 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
11311 _("source1 and dest must be same register"));
11313 switch (inst
.instruction
)
11315 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
11316 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
11317 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
11318 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
11322 inst
.instruction
|= inst
.operands
[0].reg
;
11323 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
11327 switch (inst
.instruction
)
11329 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11330 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11331 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11332 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
11335 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11336 inst
.instruction
|= inst
.operands
[0].reg
;
11337 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11345 unsigned Rd
, Rn
, Rm
;
11347 Rd
= inst
.operands
[0].reg
;
11348 Rn
= inst
.operands
[1].reg
;
11349 Rm
= inst
.operands
[2].reg
;
11351 reject_bad_reg (Rd
);
11352 reject_bad_reg (Rn
);
11353 reject_bad_reg (Rm
);
11355 inst
.instruction
|= Rd
<< 8;
11356 inst
.instruction
|= Rn
<< 16;
11357 inst
.instruction
|= Rm
;
11363 unsigned Rd
, Rn
, Rm
;
11365 Rd
= inst
.operands
[0].reg
;
11366 Rm
= inst
.operands
[1].reg
;
11367 Rn
= inst
.operands
[2].reg
;
11369 reject_bad_reg (Rd
);
11370 reject_bad_reg (Rn
);
11371 reject_bad_reg (Rm
);
11373 inst
.instruction
|= Rd
<< 8;
11374 inst
.instruction
|= Rn
<< 16;
11375 inst
.instruction
|= Rm
;
11381 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
11382 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
11383 _("SMC is not permitted on this architecture"));
11384 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11385 _("expression too complex"));
11386 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11387 inst
.instruction
|= (value
& 0xf000) >> 12;
11388 inst
.instruction
|= (value
& 0x0ff0);
11389 inst
.instruction
|= (value
& 0x000f) << 16;
11393 do_t_ssat_usat (int bias
)
11397 Rd
= inst
.operands
[0].reg
;
11398 Rn
= inst
.operands
[2].reg
;
11400 reject_bad_reg (Rd
);
11401 reject_bad_reg (Rn
);
11403 inst
.instruction
|= Rd
<< 8;
11404 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
11405 inst
.instruction
|= Rn
<< 16;
11407 if (inst
.operands
[3].present
)
11409 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
11411 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11413 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11414 _("expression too complex"));
11416 if (shift_amount
!= 0)
11418 constraint (shift_amount
> 31,
11419 _("shift expression is too large"));
11421 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
11422 inst
.instruction
|= 0x00200000; /* sh bit. */
11424 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
11425 inst
.instruction
|= (shift_amount
& 0x03) << 6;
11433 do_t_ssat_usat (1);
11441 Rd
= inst
.operands
[0].reg
;
11442 Rn
= inst
.operands
[2].reg
;
11444 reject_bad_reg (Rd
);
11445 reject_bad_reg (Rn
);
11447 inst
.instruction
|= Rd
<< 8;
11448 inst
.instruction
|= inst
.operands
[1].imm
- 1;
11449 inst
.instruction
|= Rn
<< 16;
11455 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
11456 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
11457 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
11458 || inst
.operands
[2].negative
,
11461 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
11463 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11464 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11465 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11466 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11472 if (!inst
.operands
[2].present
)
11473 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
11475 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
11476 || inst
.operands
[0].reg
== inst
.operands
[2].reg
11477 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
11480 inst
.instruction
|= inst
.operands
[0].reg
;
11481 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11482 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
11483 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
11489 unsigned Rd
, Rn
, Rm
;
11491 Rd
= inst
.operands
[0].reg
;
11492 Rn
= inst
.operands
[1].reg
;
11493 Rm
= inst
.operands
[2].reg
;
11495 reject_bad_reg (Rd
);
11496 reject_bad_reg (Rn
);
11497 reject_bad_reg (Rm
);
11499 inst
.instruction
|= Rd
<< 8;
11500 inst
.instruction
|= Rn
<< 16;
11501 inst
.instruction
|= Rm
;
11502 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
11510 Rd
= inst
.operands
[0].reg
;
11511 Rm
= inst
.operands
[1].reg
;
11513 reject_bad_reg (Rd
);
11514 reject_bad_reg (Rm
);
11516 if (inst
.instruction
<= 0xffff
11517 && inst
.size_req
!= 4
11518 && Rd
<= 7 && Rm
<= 7
11519 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
11521 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11522 inst
.instruction
|= Rd
;
11523 inst
.instruction
|= Rm
<< 3;
11525 else if (unified_syntax
)
11527 if (inst
.instruction
<= 0xffff)
11528 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11529 inst
.instruction
|= Rd
<< 8;
11530 inst
.instruction
|= Rm
;
11531 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
11535 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
11536 _("Thumb encoding does not support rotation"));
11537 constraint (1, BAD_HIREG
);
11544 /* We have to do the following check manually as ARM_EXT_OS only applies
11546 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
11548 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
))
11549 as_bad (_("SVC is not permitted on this architecture"));
11550 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
11553 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
11562 half
= (inst
.instruction
& 0x10) != 0;
11563 set_it_insn_type_last ();
11564 constraint (inst
.operands
[0].immisreg
,
11565 _("instruction requires register index"));
11567 Rn
= inst
.operands
[0].reg
;
11568 Rm
= inst
.operands
[0].imm
;
11570 constraint (Rn
== REG_SP
, BAD_SP
);
11571 reject_bad_reg (Rm
);
11573 constraint (!half
&& inst
.operands
[0].shifted
,
11574 _("instruction does not allow shifted index"));
11575 inst
.instruction
|= (Rn
<< 16) | Rm
;
11581 do_t_ssat_usat (0);
11589 Rd
= inst
.operands
[0].reg
;
11590 Rn
= inst
.operands
[2].reg
;
11592 reject_bad_reg (Rd
);
11593 reject_bad_reg (Rn
);
11595 inst
.instruction
|= Rd
<< 8;
11596 inst
.instruction
|= inst
.operands
[1].imm
;
11597 inst
.instruction
|= Rn
<< 16;
11600 /* Neon instruction encoder helpers. */
11602 /* Encodings for the different types for various Neon opcodes. */
11604 /* An "invalid" code for the following tables. */
11607 struct neon_tab_entry
11610 unsigned float_or_poly
;
11611 unsigned scalar_or_imm
;
11614 /* Map overloaded Neon opcodes to their respective encodings. */
11615 #define NEON_ENC_TAB \
11616 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11617 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11618 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11619 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11620 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11621 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11622 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11623 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11624 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11625 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11626 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11627 /* Register variants of the following two instructions are encoded as
11628 vcge / vcgt with the operands reversed. */ \
11629 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11630 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11631 X(vfma, N_INV, 0x0000c10, N_INV), \
11632 X(vfms, N_INV, 0x0200c10, N_INV), \
11633 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11634 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11635 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11636 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11637 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11638 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11639 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11640 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11641 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11642 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11643 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11644 X(vshl, 0x0000400, N_INV, 0x0800510), \
11645 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11646 X(vand, 0x0000110, N_INV, 0x0800030), \
11647 X(vbic, 0x0100110, N_INV, 0x0800030), \
11648 X(veor, 0x1000110, N_INV, N_INV), \
11649 X(vorn, 0x0300110, N_INV, 0x0800010), \
11650 X(vorr, 0x0200110, N_INV, 0x0800010), \
11651 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11652 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11653 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11654 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11655 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11656 X(vst1, 0x0000000, 0x0800000, N_INV), \
11657 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11658 X(vst2, 0x0000100, 0x0800100, N_INV), \
11659 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11660 X(vst3, 0x0000200, 0x0800200, N_INV), \
11661 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11662 X(vst4, 0x0000300, 0x0800300, N_INV), \
11663 X(vmovn, 0x1b20200, N_INV, N_INV), \
11664 X(vtrn, 0x1b20080, N_INV, N_INV), \
11665 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11666 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11667 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11668 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
11669 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
11670 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
11671 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
11672 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11673 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11674 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11675 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11679 #define X(OPC,I,F,S) N_MNEM_##OPC
11684 static const struct neon_tab_entry neon_enc_tab
[] =
11686 #define X(OPC,I,F,S) { (I), (F), (S) }
11691 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
11692 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11693 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11694 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11695 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11696 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11697 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11698 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11699 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11700 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11701 #define NEON_ENC_SINGLE_(X) \
11702 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11703 #define NEON_ENC_DOUBLE_(X) \
11704 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11706 #define NEON_ENCODE(type, inst) \
11709 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
11710 inst.is_neon = 1; \
11714 #define check_neon_suffixes \
11717 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
11719 as_bad (_("invalid neon suffix for non neon instruction")); \
11725 /* Define shapes for instruction operands. The following mnemonic characters
11726 are used in this table:
11728 F - VFP S<n> register
11729 D - Neon D<n> register
11730 Q - Neon Q<n> register
11734 L - D<n> register list
11736 This table is used to generate various data:
11737 - enumerations of the form NS_DDR to be used as arguments to
11739 - a table classifying shapes into single, double, quad, mixed.
11740 - a table used to drive neon_select_shape. */
11742 #define NEON_SHAPE_DEF \
11743 X(3, (D, D, D), DOUBLE), \
11744 X(3, (Q, Q, Q), QUAD), \
11745 X(3, (D, D, I), DOUBLE), \
11746 X(3, (Q, Q, I), QUAD), \
11747 X(3, (D, D, S), DOUBLE), \
11748 X(3, (Q, Q, S), QUAD), \
11749 X(2, (D, D), DOUBLE), \
11750 X(2, (Q, Q), QUAD), \
11751 X(2, (D, S), DOUBLE), \
11752 X(2, (Q, S), QUAD), \
11753 X(2, (D, R), DOUBLE), \
11754 X(2, (Q, R), QUAD), \
11755 X(2, (D, I), DOUBLE), \
11756 X(2, (Q, I), QUAD), \
11757 X(3, (D, L, D), DOUBLE), \
11758 X(2, (D, Q), MIXED), \
11759 X(2, (Q, D), MIXED), \
11760 X(3, (D, Q, I), MIXED), \
11761 X(3, (Q, D, I), MIXED), \
11762 X(3, (Q, D, D), MIXED), \
11763 X(3, (D, Q, Q), MIXED), \
11764 X(3, (Q, Q, D), MIXED), \
11765 X(3, (Q, D, S), MIXED), \
11766 X(3, (D, Q, S), MIXED), \
11767 X(4, (D, D, D, I), DOUBLE), \
11768 X(4, (Q, Q, Q, I), QUAD), \
11769 X(2, (F, F), SINGLE), \
11770 X(3, (F, F, F), SINGLE), \
11771 X(2, (F, I), SINGLE), \
11772 X(2, (F, D), MIXED), \
11773 X(2, (D, F), MIXED), \
11774 X(3, (F, F, I), MIXED), \
11775 X(4, (R, R, F, F), SINGLE), \
11776 X(4, (F, F, R, R), SINGLE), \
11777 X(3, (D, R, R), DOUBLE), \
11778 X(3, (R, R, D), DOUBLE), \
11779 X(2, (S, R), SINGLE), \
11780 X(2, (R, S), SINGLE), \
11781 X(2, (F, R), SINGLE), \
11782 X(2, (R, F), SINGLE)
11784 #define S2(A,B) NS_##A##B
11785 #define S3(A,B,C) NS_##A##B##C
11786 #define S4(A,B,C,D) NS_##A##B##C##D
11788 #define X(N, L, C) S##N L
11801 enum neon_shape_class
11809 #define X(N, L, C) SC_##C
11811 static enum neon_shape_class neon_shape_class
[] =
11829 /* Register widths of above. */
11830 static unsigned neon_shape_el_size
[] =
11841 struct neon_shape_info
11844 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
11847 #define S2(A,B) { SE_##A, SE_##B }
11848 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11849 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11851 #define X(N, L, C) { N, S##N L }
11853 static struct neon_shape_info neon_shape_tab
[] =
11863 /* Bit masks used in type checking given instructions.
11864 'N_EQK' means the type must be the same as (or based on in some way) the key
11865 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11866 set, various other bits can be set as well in order to modify the meaning of
11867 the type constraint. */
11869 enum neon_type_mask
11892 N_KEY
= 0x1000000, /* Key element (main type specifier). */
11893 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
11894 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
11895 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
11896 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
11897 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11898 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11899 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11900 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
11901 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11903 N_MAX_NONSPECIAL
= N_F64
11906 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11908 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11909 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11910 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11911 #define N_SUF_32 (N_SU_32 | N_F32)
11912 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11913 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11915 /* Pass this as the first type argument to neon_check_type to ignore types
11917 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11919 /* Select a "shape" for the current instruction (describing register types or
11920 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11921 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11922 function of operand parsing, so this function doesn't need to be called.
11923 Shapes should be listed in order of decreasing length. */
11925 static enum neon_shape
11926 neon_select_shape (enum neon_shape shape
, ...)
11929 enum neon_shape first_shape
= shape
;
11931 /* Fix missing optional operands. FIXME: we don't know at this point how
11932 many arguments we should have, so this makes the assumption that we have
11933 > 1. This is true of all current Neon opcodes, I think, but may not be
11934 true in the future. */
11935 if (!inst
.operands
[1].present
)
11936 inst
.operands
[1] = inst
.operands
[0];
11938 va_start (ap
, shape
);
11940 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
11945 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
11947 if (!inst
.operands
[j
].present
)
11953 switch (neon_shape_tab
[shape
].el
[j
])
11956 if (!(inst
.operands
[j
].isreg
11957 && inst
.operands
[j
].isvec
11958 && inst
.operands
[j
].issingle
11959 && !inst
.operands
[j
].isquad
))
11964 if (!(inst
.operands
[j
].isreg
11965 && inst
.operands
[j
].isvec
11966 && !inst
.operands
[j
].isquad
11967 && !inst
.operands
[j
].issingle
))
11972 if (!(inst
.operands
[j
].isreg
11973 && !inst
.operands
[j
].isvec
))
11978 if (!(inst
.operands
[j
].isreg
11979 && inst
.operands
[j
].isvec
11980 && inst
.operands
[j
].isquad
11981 && !inst
.operands
[j
].issingle
))
11986 if (!(!inst
.operands
[j
].isreg
11987 && !inst
.operands
[j
].isscalar
))
11992 if (!(!inst
.operands
[j
].isreg
11993 && inst
.operands
[j
].isscalar
))
12009 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
12010 first_error (_("invalid instruction shape"));
12015 /* True if SHAPE is predominantly a quadword operation (most of the time, this
12016 means the Q bit should be set). */
12019 neon_quad (enum neon_shape shape
)
12021 return neon_shape_class
[shape
] == SC_QUAD
;
12025 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
12028 /* Allow modification to be made to types which are constrained to be
12029 based on the key element, based on bits set alongside N_EQK. */
12030 if ((typebits
& N_EQK
) != 0)
12032 if ((typebits
& N_HLF
) != 0)
12034 else if ((typebits
& N_DBL
) != 0)
12036 if ((typebits
& N_SGN
) != 0)
12037 *g_type
= NT_signed
;
12038 else if ((typebits
& N_UNS
) != 0)
12039 *g_type
= NT_unsigned
;
12040 else if ((typebits
& N_INT
) != 0)
12041 *g_type
= NT_integer
;
12042 else if ((typebits
& N_FLT
) != 0)
12043 *g_type
= NT_float
;
12044 else if ((typebits
& N_SIZ
) != 0)
12045 *g_type
= NT_untyped
;
12049 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12050 operand type, i.e. the single type specified in a Neon instruction when it
12051 is the only one given. */
12053 static struct neon_type_el
12054 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
12056 struct neon_type_el dest
= *key
;
12058 gas_assert ((thisarg
& N_EQK
) != 0);
12060 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
12065 /* Convert Neon type and size into compact bitmask representation. */
12067 static enum neon_type_mask
12068 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
12075 case 8: return N_8
;
12076 case 16: return N_16
;
12077 case 32: return N_32
;
12078 case 64: return N_64
;
12086 case 8: return N_I8
;
12087 case 16: return N_I16
;
12088 case 32: return N_I32
;
12089 case 64: return N_I64
;
12097 case 16: return N_F16
;
12098 case 32: return N_F32
;
12099 case 64: return N_F64
;
12107 case 8: return N_P8
;
12108 case 16: return N_P16
;
12116 case 8: return N_S8
;
12117 case 16: return N_S16
;
12118 case 32: return N_S32
;
12119 case 64: return N_S64
;
12127 case 8: return N_U8
;
12128 case 16: return N_U16
;
12129 case 32: return N_U32
;
12130 case 64: return N_U64
;
12141 /* Convert compact Neon bitmask type representation to a type and size. Only
12142 handles the case where a single bit is set in the mask. */
12145 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
12146 enum neon_type_mask mask
)
12148 if ((mask
& N_EQK
) != 0)
12151 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
12153 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
12155 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
12157 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
12162 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
12164 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
12165 *type
= NT_unsigned
;
12166 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
12167 *type
= NT_integer
;
12168 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
12169 *type
= NT_untyped
;
12170 else if ((mask
& (N_P8
| N_P16
)) != 0)
12172 else if ((mask
& (N_F32
| N_F64
)) != 0)
12180 /* Modify a bitmask of allowed types. This is only needed for type
12184 modify_types_allowed (unsigned allowed
, unsigned mods
)
12187 enum neon_el_type type
;
12193 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
12195 if (el_type_of_type_chk (&type
, &size
,
12196 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
12198 neon_modify_type_size (mods
, &type
, &size
);
12199 destmask
|= type_chk_of_el_type (type
, size
);
12206 /* Check type and return type classification.
12207 The manual states (paraphrase): If one datatype is given, it indicates the
12209 - the second operand, if there is one
12210 - the operand, if there is no second operand
12211 - the result, if there are no operands.
12212 This isn't quite good enough though, so we use a concept of a "key" datatype
12213 which is set on a per-instruction basis, which is the one which matters when
12214 only one data type is written.
12215 Note: this function has side-effects (e.g. filling in missing operands). All
12216 Neon instructions should call it before performing bit encoding. */
12218 static struct neon_type_el
12219 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
12222 unsigned i
, pass
, key_el
= 0;
12223 unsigned types
[NEON_MAX_TYPE_ELS
];
12224 enum neon_el_type k_type
= NT_invtype
;
12225 unsigned k_size
= -1u;
12226 struct neon_type_el badtype
= {NT_invtype
, -1};
12227 unsigned key_allowed
= 0;
12229 /* Optional registers in Neon instructions are always (not) in operand 1.
12230 Fill in the missing operand here, if it was omitted. */
12231 if (els
> 1 && !inst
.operands
[1].present
)
12232 inst
.operands
[1] = inst
.operands
[0];
12234 /* Suck up all the varargs. */
12236 for (i
= 0; i
< els
; i
++)
12238 unsigned thisarg
= va_arg (ap
, unsigned);
12239 if (thisarg
== N_IGNORE_TYPE
)
12244 types
[i
] = thisarg
;
12245 if ((thisarg
& N_KEY
) != 0)
12250 if (inst
.vectype
.elems
> 0)
12251 for (i
= 0; i
< els
; i
++)
12252 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
12254 first_error (_("types specified in both the mnemonic and operands"));
12258 /* Duplicate inst.vectype elements here as necessary.
12259 FIXME: No idea if this is exactly the same as the ARM assembler,
12260 particularly when an insn takes one register and one non-register
12262 if (inst
.vectype
.elems
== 1 && els
> 1)
12265 inst
.vectype
.elems
= els
;
12266 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
12267 for (j
= 0; j
< els
; j
++)
12269 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
12272 else if (inst
.vectype
.elems
== 0 && els
> 0)
12275 /* No types were given after the mnemonic, so look for types specified
12276 after each operand. We allow some flexibility here; as long as the
12277 "key" operand has a type, we can infer the others. */
12278 for (j
= 0; j
< els
; j
++)
12279 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
12280 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
12282 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
12284 for (j
= 0; j
< els
; j
++)
12285 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
12286 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
12291 first_error (_("operand types can't be inferred"));
12295 else if (inst
.vectype
.elems
!= els
)
12297 first_error (_("type specifier has the wrong number of parts"));
12301 for (pass
= 0; pass
< 2; pass
++)
12303 for (i
= 0; i
< els
; i
++)
12305 unsigned thisarg
= types
[i
];
12306 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
12307 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
12308 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
12309 unsigned g_size
= inst
.vectype
.el
[i
].size
;
12311 /* Decay more-specific signed & unsigned types to sign-insensitive
12312 integer types if sign-specific variants are unavailable. */
12313 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
12314 && (types_allowed
& N_SU_ALL
) == 0)
12315 g_type
= NT_integer
;
12317 /* If only untyped args are allowed, decay any more specific types to
12318 them. Some instructions only care about signs for some element
12319 sizes, so handle that properly. */
12320 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
12321 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
12322 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
12323 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
12324 g_type
= NT_untyped
;
12328 if ((thisarg
& N_KEY
) != 0)
12332 key_allowed
= thisarg
& ~N_KEY
;
12337 if ((thisarg
& N_VFP
) != 0)
12339 enum neon_shape_el regshape
;
12340 unsigned regwidth
, match
;
12342 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
12345 first_error (_("invalid instruction shape"));
12348 regshape
= neon_shape_tab
[ns
].el
[i
];
12349 regwidth
= neon_shape_el_size
[regshape
];
12351 /* In VFP mode, operands must match register widths. If we
12352 have a key operand, use its width, else use the width of
12353 the current operand. */
12359 if (regwidth
!= match
)
12361 first_error (_("operand size must match register width"));
12366 if ((thisarg
& N_EQK
) == 0)
12368 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
12370 if ((given_type
& types_allowed
) == 0)
12372 first_error (_("bad type in Neon instruction"));
12378 enum neon_el_type mod_k_type
= k_type
;
12379 unsigned mod_k_size
= k_size
;
12380 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
12381 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
12383 first_error (_("inconsistent types in Neon instruction"));
12391 return inst
.vectype
.el
[key_el
];
12394 /* Neon-style VFP instruction forwarding. */
12396 /* Thumb VFP instructions have 0xE in the condition field. */
12399 do_vfp_cond_or_thumb (void)
12404 inst
.instruction
|= 0xe0000000;
12406 inst
.instruction
|= inst
.cond
<< 28;
12409 /* Look up and encode a simple mnemonic, for use as a helper function for the
12410 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12411 etc. It is assumed that operand parsing has already been done, and that the
12412 operands are in the form expected by the given opcode (this isn't necessarily
12413 the same as the form in which they were parsed, hence some massaging must
12414 take place before this function is called).
12415 Checks current arch version against that in the looked-up opcode. */
12418 do_vfp_nsyn_opcode (const char *opname
)
12420 const struct asm_opcode
*opcode
;
12422 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
12427 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
12428 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
12435 inst
.instruction
= opcode
->tvalue
;
12436 opcode
->tencode ();
12440 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
12441 opcode
->aencode ();
12446 do_vfp_nsyn_add_sub (enum neon_shape rs
)
12448 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
12453 do_vfp_nsyn_opcode ("fadds");
12455 do_vfp_nsyn_opcode ("fsubs");
12460 do_vfp_nsyn_opcode ("faddd");
12462 do_vfp_nsyn_opcode ("fsubd");
12466 /* Check operand types to see if this is a VFP instruction, and if so call
12470 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
12472 enum neon_shape rs
;
12473 struct neon_type_el et
;
12478 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12479 et
= neon_check_type (2, rs
,
12480 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12484 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12485 et
= neon_check_type (3, rs
,
12486 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12493 if (et
.type
!= NT_invtype
)
12504 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
12506 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
12511 do_vfp_nsyn_opcode ("fmacs");
12513 do_vfp_nsyn_opcode ("fnmacs");
12518 do_vfp_nsyn_opcode ("fmacd");
12520 do_vfp_nsyn_opcode ("fnmacd");
12525 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
12527 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
12532 do_vfp_nsyn_opcode ("ffmas");
12534 do_vfp_nsyn_opcode ("ffnmas");
12539 do_vfp_nsyn_opcode ("ffmad");
12541 do_vfp_nsyn_opcode ("ffnmad");
12546 do_vfp_nsyn_mul (enum neon_shape rs
)
12549 do_vfp_nsyn_opcode ("fmuls");
12551 do_vfp_nsyn_opcode ("fmuld");
12555 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
12557 int is_neg
= (inst
.instruction
& 0x80) != 0;
12558 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
12563 do_vfp_nsyn_opcode ("fnegs");
12565 do_vfp_nsyn_opcode ("fabss");
12570 do_vfp_nsyn_opcode ("fnegd");
12572 do_vfp_nsyn_opcode ("fabsd");
12576 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12577 insns belong to Neon, and are handled elsewhere. */
12580 do_vfp_nsyn_ldm_stm (int is_dbmode
)
12582 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
12586 do_vfp_nsyn_opcode ("fldmdbs");
12588 do_vfp_nsyn_opcode ("fldmias");
12593 do_vfp_nsyn_opcode ("fstmdbs");
12595 do_vfp_nsyn_opcode ("fstmias");
12600 do_vfp_nsyn_sqrt (void)
12602 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12603 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12606 do_vfp_nsyn_opcode ("fsqrts");
12608 do_vfp_nsyn_opcode ("fsqrtd");
12612 do_vfp_nsyn_div (void)
12614 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12615 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12616 N_F32
| N_F64
| N_KEY
| N_VFP
);
12619 do_vfp_nsyn_opcode ("fdivs");
12621 do_vfp_nsyn_opcode ("fdivd");
12625 do_vfp_nsyn_nmul (void)
12627 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12628 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12629 N_F32
| N_F64
| N_KEY
| N_VFP
);
12633 NEON_ENCODE (SINGLE
, inst
);
12634 do_vfp_sp_dyadic ();
12638 NEON_ENCODE (DOUBLE
, inst
);
12639 do_vfp_dp_rd_rn_rm ();
12641 do_vfp_cond_or_thumb ();
12645 do_vfp_nsyn_cmp (void)
12647 if (inst
.operands
[1].isreg
)
12649 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12650 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12654 NEON_ENCODE (SINGLE
, inst
);
12655 do_vfp_sp_monadic ();
12659 NEON_ENCODE (DOUBLE
, inst
);
12660 do_vfp_dp_rd_rm ();
12665 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
12666 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
12668 switch (inst
.instruction
& 0x0fffffff)
12671 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
12674 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
12682 NEON_ENCODE (SINGLE
, inst
);
12683 do_vfp_sp_compare_z ();
12687 NEON_ENCODE (DOUBLE
, inst
);
12691 do_vfp_cond_or_thumb ();
12695 nsyn_insert_sp (void)
12697 inst
.operands
[1] = inst
.operands
[0];
12698 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
12699 inst
.operands
[0].reg
= REG_SP
;
12700 inst
.operands
[0].isreg
= 1;
12701 inst
.operands
[0].writeback
= 1;
12702 inst
.operands
[0].present
= 1;
12706 do_vfp_nsyn_push (void)
12709 if (inst
.operands
[1].issingle
)
12710 do_vfp_nsyn_opcode ("fstmdbs");
12712 do_vfp_nsyn_opcode ("fstmdbd");
12716 do_vfp_nsyn_pop (void)
12719 if (inst
.operands
[1].issingle
)
12720 do_vfp_nsyn_opcode ("fldmias");
12722 do_vfp_nsyn_opcode ("fldmiad");
12725 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12726 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12729 neon_dp_fixup (struct arm_it
* insn
)
12731 unsigned int i
= insn
->instruction
;
12736 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12747 insn
->instruction
= i
;
12750 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12754 neon_logbits (unsigned x
)
12756 return ffs (x
) - 4;
12759 #define LOW4(R) ((R) & 0xf)
12760 #define HI1(R) (((R) >> 4) & 1)
12762 /* Encode insns with bit pattern:
12764 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12765 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12767 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12768 different meaning for some instruction. */
12771 neon_three_same (int isquad
, int ubit
, int size
)
12773 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12774 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12775 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12776 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12777 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12778 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12779 inst
.instruction
|= (isquad
!= 0) << 6;
12780 inst
.instruction
|= (ubit
!= 0) << 24;
12782 inst
.instruction
|= neon_logbits (size
) << 20;
12784 neon_dp_fixup (&inst
);
12787 /* Encode instructions of the form:
12789 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12790 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12792 Don't write size if SIZE == -1. */
12795 neon_two_same (int qbit
, int ubit
, int size
)
12797 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12798 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12799 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12800 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12801 inst
.instruction
|= (qbit
!= 0) << 6;
12802 inst
.instruction
|= (ubit
!= 0) << 24;
12805 inst
.instruction
|= neon_logbits (size
) << 18;
12807 neon_dp_fixup (&inst
);
12810 /* Neon instruction encoders, in approximate order of appearance. */
12813 do_neon_dyadic_i_su (void)
12815 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12816 struct neon_type_el et
= neon_check_type (3, rs
,
12817 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
12818 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12822 do_neon_dyadic_i64_su (void)
12824 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12825 struct neon_type_el et
= neon_check_type (3, rs
,
12826 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12827 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12831 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
12834 unsigned size
= et
.size
>> 3;
12835 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12836 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12837 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12838 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12839 inst
.instruction
|= (isquad
!= 0) << 6;
12840 inst
.instruction
|= immbits
<< 16;
12841 inst
.instruction
|= (size
>> 3) << 7;
12842 inst
.instruction
|= (size
& 0x7) << 19;
12844 inst
.instruction
|= (uval
!= 0) << 24;
12846 neon_dp_fixup (&inst
);
12850 do_neon_shl_imm (void)
12852 if (!inst
.operands
[2].isreg
)
12854 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12855 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
12856 NEON_ENCODE (IMMED
, inst
);
12857 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
12861 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12862 struct neon_type_el et
= neon_check_type (3, rs
,
12863 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12866 /* VSHL/VQSHL 3-register variants have syntax such as:
12868 whereas other 3-register operations encoded by neon_three_same have
12871 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12873 tmp
= inst
.operands
[2].reg
;
12874 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12875 inst
.operands
[1].reg
= tmp
;
12876 NEON_ENCODE (INTEGER
, inst
);
12877 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12882 do_neon_qshl_imm (void)
12884 if (!inst
.operands
[2].isreg
)
12886 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12887 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
12889 NEON_ENCODE (IMMED
, inst
);
12890 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
12891 inst
.operands
[2].imm
);
12895 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12896 struct neon_type_el et
= neon_check_type (3, rs
,
12897 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12900 /* See note in do_neon_shl_imm. */
12901 tmp
= inst
.operands
[2].reg
;
12902 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12903 inst
.operands
[1].reg
= tmp
;
12904 NEON_ENCODE (INTEGER
, inst
);
12905 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12910 do_neon_rshl (void)
12912 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12913 struct neon_type_el et
= neon_check_type (3, rs
,
12914 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12917 tmp
= inst
.operands
[2].reg
;
12918 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12919 inst
.operands
[1].reg
= tmp
;
12920 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12924 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
12926 /* Handle .I8 pseudo-instructions. */
12929 /* Unfortunately, this will make everything apart from zero out-of-range.
12930 FIXME is this the intended semantics? There doesn't seem much point in
12931 accepting .I8 if so. */
12932 immediate
|= immediate
<< 8;
12938 if (immediate
== (immediate
& 0x000000ff))
12940 *immbits
= immediate
;
12943 else if (immediate
== (immediate
& 0x0000ff00))
12945 *immbits
= immediate
>> 8;
12948 else if (immediate
== (immediate
& 0x00ff0000))
12950 *immbits
= immediate
>> 16;
12953 else if (immediate
== (immediate
& 0xff000000))
12955 *immbits
= immediate
>> 24;
12958 if ((immediate
& 0xffff) != (immediate
>> 16))
12959 goto bad_immediate
;
12960 immediate
&= 0xffff;
12963 if (immediate
== (immediate
& 0x000000ff))
12965 *immbits
= immediate
;
12968 else if (immediate
== (immediate
& 0x0000ff00))
12970 *immbits
= immediate
>> 8;
12975 first_error (_("immediate value out of range"));
12979 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12983 neon_bits_same_in_bytes (unsigned imm
)
12985 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
12986 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
12987 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
12988 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
12991 /* For immediate of above form, return 0bABCD. */
12994 neon_squash_bits (unsigned imm
)
12996 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
12997 | ((imm
& 0x01000000) >> 21);
13000 /* Compress quarter-float representation to 0b...000 abcdefgh. */
13003 neon_qfloat_bits (unsigned imm
)
13005 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
13008 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13009 the instruction. *OP is passed as the initial value of the op field, and
13010 may be set to a different value depending on the constant (i.e.
13011 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13012 MVN). If the immediate looks like a repeated pattern then also
13013 try smaller element sizes. */
13016 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
13017 unsigned *immbits
, int *op
, int size
,
13018 enum neon_el_type type
)
13020 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13022 if (type
== NT_float
&& !float_p
)
13025 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
13027 if (size
!= 32 || *op
== 1)
13029 *immbits
= neon_qfloat_bits (immlo
);
13035 if (neon_bits_same_in_bytes (immhi
)
13036 && neon_bits_same_in_bytes (immlo
))
13040 *immbits
= (neon_squash_bits (immhi
) << 4)
13041 | neon_squash_bits (immlo
);
13046 if (immhi
!= immlo
)
13052 if (immlo
== (immlo
& 0x000000ff))
13057 else if (immlo
== (immlo
& 0x0000ff00))
13059 *immbits
= immlo
>> 8;
13062 else if (immlo
== (immlo
& 0x00ff0000))
13064 *immbits
= immlo
>> 16;
13067 else if (immlo
== (immlo
& 0xff000000))
13069 *immbits
= immlo
>> 24;
13072 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
13074 *immbits
= (immlo
>> 8) & 0xff;
13077 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
13079 *immbits
= (immlo
>> 16) & 0xff;
13083 if ((immlo
& 0xffff) != (immlo
>> 16))
13090 if (immlo
== (immlo
& 0x000000ff))
13095 else if (immlo
== (immlo
& 0x0000ff00))
13097 *immbits
= immlo
>> 8;
13101 if ((immlo
& 0xff) != (immlo
>> 8))
13106 if (immlo
== (immlo
& 0x000000ff))
13108 /* Don't allow MVN with 8-bit immediate. */
13118 /* Write immediate bits [7:0] to the following locations:
13120 |28/24|23 19|18 16|15 4|3 0|
13121 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13123 This function is used by VMOV/VMVN/VORR/VBIC. */
13126 neon_write_immbits (unsigned immbits
)
13128 inst
.instruction
|= immbits
& 0xf;
13129 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
13130 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
13133 /* Invert low-order SIZE bits of XHI:XLO. */
13136 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
13138 unsigned immlo
= xlo
? *xlo
: 0;
13139 unsigned immhi
= xhi
? *xhi
: 0;
13144 immlo
= (~immlo
) & 0xff;
13148 immlo
= (~immlo
) & 0xffff;
13152 immhi
= (~immhi
) & 0xffffffff;
13153 /* fall through. */
13156 immlo
= (~immlo
) & 0xffffffff;
13171 do_neon_logic (void)
13173 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
13175 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13176 neon_check_type (3, rs
, N_IGNORE_TYPE
);
13177 /* U bit and size field were set as part of the bitmask. */
13178 NEON_ENCODE (INTEGER
, inst
);
13179 neon_three_same (neon_quad (rs
), 0, -1);
13183 const int three_ops_form
= (inst
.operands
[2].present
13184 && !inst
.operands
[2].isreg
);
13185 const int immoperand
= (three_ops_form
? 2 : 1);
13186 enum neon_shape rs
= (three_ops_form
13187 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
13188 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
13189 struct neon_type_el et
= neon_check_type (2, rs
,
13190 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
13191 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
13195 if (et
.type
== NT_invtype
)
13198 if (three_ops_form
)
13199 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13200 _("first and second operands shall be the same register"));
13202 NEON_ENCODE (IMMED
, inst
);
13204 immbits
= inst
.operands
[immoperand
].imm
;
13207 /* .i64 is a pseudo-op, so the immediate must be a repeating
13209 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
13210 inst
.operands
[immoperand
].reg
: 0))
13212 /* Set immbits to an invalid constant. */
13213 immbits
= 0xdeadbeef;
13220 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13224 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13228 /* Pseudo-instruction for VBIC. */
13229 neon_invert_size (&immbits
, 0, et
.size
);
13230 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13234 /* Pseudo-instruction for VORR. */
13235 neon_invert_size (&immbits
, 0, et
.size
);
13236 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13246 inst
.instruction
|= neon_quad (rs
) << 6;
13247 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13248 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13249 inst
.instruction
|= cmode
<< 8;
13250 neon_write_immbits (immbits
);
13252 neon_dp_fixup (&inst
);
13257 do_neon_bitfield (void)
13259 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13260 neon_check_type (3, rs
, N_IGNORE_TYPE
);
13261 neon_three_same (neon_quad (rs
), 0, -1);
13265 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
13268 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13269 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
13271 if (et
.type
== NT_float
)
13273 NEON_ENCODE (FLOAT
, inst
);
13274 neon_three_same (neon_quad (rs
), 0, -1);
13278 NEON_ENCODE (INTEGER
, inst
);
13279 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
13284 do_neon_dyadic_if_su (void)
13286 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
13290 do_neon_dyadic_if_su_d (void)
13292 /* This version only allow D registers, but that constraint is enforced during
13293 operand parsing so we don't need to do anything extra here. */
13294 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
13298 do_neon_dyadic_if_i_d (void)
13300 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13301 affected if we specify unsigned args. */
13302 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13305 enum vfp_or_neon_is_neon_bits
13308 NEON_CHECK_ARCH
= 2
13311 /* Call this function if an instruction which may have belonged to the VFP or
13312 Neon instruction sets, but turned out to be a Neon instruction (due to the
13313 operand types involved, etc.). We have to check and/or fix-up a couple of
13316 - Make sure the user hasn't attempted to make a Neon instruction
13318 - Alter the value in the condition code field if necessary.
13319 - Make sure that the arch supports Neon instructions.
13321 Which of these operations take place depends on bits from enum
13322 vfp_or_neon_is_neon_bits.
13324 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13325 current instruction's condition is COND_ALWAYS, the condition field is
13326 changed to inst.uncond_value. This is necessary because instructions shared
13327 between VFP and Neon may be conditional for the VFP variants only, and the
13328 unconditional Neon version must have, e.g., 0xF in the condition field. */
13331 vfp_or_neon_is_neon (unsigned check
)
13333 /* Conditions are always legal in Thumb mode (IT blocks). */
13334 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
13336 if (inst
.cond
!= COND_ALWAYS
)
13338 first_error (_(BAD_COND
));
13341 if (inst
.uncond_value
!= -1)
13342 inst
.instruction
|= inst
.uncond_value
<< 28;
13345 if ((check
& NEON_CHECK_ARCH
)
13346 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
13348 first_error (_(BAD_FPU
));
13356 do_neon_addsub_if_i (void)
13358 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
13361 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13364 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13365 affected if we specify unsigned args. */
13366 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
13369 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13371 V<op> A,B (A is operand 0, B is operand 2)
13376 so handle that case specially. */
13379 neon_exchange_operands (void)
13381 void *scratch
= alloca (sizeof (inst
.operands
[0]));
13382 if (inst
.operands
[1].present
)
13384 /* Swap operands[1] and operands[2]. */
13385 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
13386 inst
.operands
[1] = inst
.operands
[2];
13387 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
13391 inst
.operands
[1] = inst
.operands
[2];
13392 inst
.operands
[2] = inst
.operands
[0];
13397 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
13399 if (inst
.operands
[2].isreg
)
13402 neon_exchange_operands ();
13403 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
13407 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13408 struct neon_type_el et
= neon_check_type (2, rs
,
13409 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
13411 NEON_ENCODE (IMMED
, inst
);
13412 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13413 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13414 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13415 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13416 inst
.instruction
|= neon_quad (rs
) << 6;
13417 inst
.instruction
|= (et
.type
== NT_float
) << 10;
13418 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13420 neon_dp_fixup (&inst
);
13427 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
13431 do_neon_cmp_inv (void)
13433 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
13439 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
13442 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13443 scalars, which are encoded in 5 bits, M : Rm.
13444 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13445 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13449 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
13451 unsigned regno
= NEON_SCALAR_REG (scalar
);
13452 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
13457 if (regno
> 7 || elno
> 3)
13459 return regno
| (elno
<< 3);
13462 if (regno
> 15 || elno
> 1)
13464 return regno
| (elno
<< 4);
13468 first_error (_("scalar out of range for multiply instruction"));
13474 /* Encode multiply / multiply-accumulate scalar instructions. */
13477 neon_mul_mac (struct neon_type_el et
, int ubit
)
13481 /* Give a more helpful error message if we have an invalid type. */
13482 if (et
.type
== NT_invtype
)
13485 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
13486 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13487 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13488 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13489 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13490 inst
.instruction
|= LOW4 (scalar
);
13491 inst
.instruction
|= HI1 (scalar
) << 5;
13492 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13493 inst
.instruction
|= neon_logbits (et
.size
) << 20;
13494 inst
.instruction
|= (ubit
!= 0) << 24;
13496 neon_dp_fixup (&inst
);
13500 do_neon_mac_maybe_scalar (void)
13502 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
13505 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13508 if (inst
.operands
[2].isscalar
)
13510 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13511 struct neon_type_el et
= neon_check_type (3, rs
,
13512 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
13513 NEON_ENCODE (SCALAR
, inst
);
13514 neon_mul_mac (et
, neon_quad (rs
));
13518 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13519 affected if we specify unsigned args. */
13520 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13525 do_neon_fmac (void)
13527 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
13530 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13533 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13539 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13540 struct neon_type_el et
= neon_check_type (3, rs
,
13541 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13542 neon_three_same (neon_quad (rs
), 0, et
.size
);
13545 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13546 same types as the MAC equivalents. The polynomial type for this instruction
13547 is encoded the same as the integer type. */
13552 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
13555 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13558 if (inst
.operands
[2].isscalar
)
13559 do_neon_mac_maybe_scalar ();
13561 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
13565 do_neon_qdmulh (void)
13567 if (inst
.operands
[2].isscalar
)
13569 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13570 struct neon_type_el et
= neon_check_type (3, rs
,
13571 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13572 NEON_ENCODE (SCALAR
, inst
);
13573 neon_mul_mac (et
, neon_quad (rs
));
13577 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13578 struct neon_type_el et
= neon_check_type (3, rs
,
13579 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13580 NEON_ENCODE (INTEGER
, inst
);
13581 /* The U bit (rounding) comes from bit mask. */
13582 neon_three_same (neon_quad (rs
), 0, et
.size
);
13587 do_neon_fcmp_absolute (void)
13589 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13590 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13591 /* Size field comes from bit mask. */
13592 neon_three_same (neon_quad (rs
), 1, -1);
13596 do_neon_fcmp_absolute_inv (void)
13598 neon_exchange_operands ();
13599 do_neon_fcmp_absolute ();
13603 do_neon_step (void)
13605 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13606 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13607 neon_three_same (neon_quad (rs
), 0, -1);
13611 do_neon_abs_neg (void)
13613 enum neon_shape rs
;
13614 struct neon_type_el et
;
13616 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
13619 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13622 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13623 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
13625 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13626 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13627 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13628 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13629 inst
.instruction
|= neon_quad (rs
) << 6;
13630 inst
.instruction
|= (et
.type
== NT_float
) << 10;
13631 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13633 neon_dp_fixup (&inst
);
13639 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13640 struct neon_type_el et
= neon_check_type (2, rs
,
13641 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13642 int imm
= inst
.operands
[2].imm
;
13643 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13644 _("immediate out of range for insert"));
13645 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13651 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13652 struct neon_type_el et
= neon_check_type (2, rs
,
13653 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13654 int imm
= inst
.operands
[2].imm
;
13655 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13656 _("immediate out of range for insert"));
13657 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
13661 do_neon_qshlu_imm (void)
13663 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13664 struct neon_type_el et
= neon_check_type (2, rs
,
13665 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
13666 int imm
= inst
.operands
[2].imm
;
13667 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13668 _("immediate out of range for shift"));
13669 /* Only encodes the 'U present' variant of the instruction.
13670 In this case, signed types have OP (bit 8) set to 0.
13671 Unsigned types have OP set to 1. */
13672 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
13673 /* The rest of the bits are the same as other immediate shifts. */
13674 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13678 do_neon_qmovn (void)
13680 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13681 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13682 /* Saturating move where operands can be signed or unsigned, and the
13683 destination has the same signedness. */
13684 NEON_ENCODE (INTEGER
, inst
);
13685 if (et
.type
== NT_unsigned
)
13686 inst
.instruction
|= 0xc0;
13688 inst
.instruction
|= 0x80;
13689 neon_two_same (0, 1, et
.size
/ 2);
13693 do_neon_qmovun (void)
13695 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13696 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13697 /* Saturating move with unsigned results. Operands must be signed. */
13698 NEON_ENCODE (INTEGER
, inst
);
13699 neon_two_same (0, 1, et
.size
/ 2);
13703 do_neon_rshift_sat_narrow (void)
13705 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13706 or unsigned. If operands are unsigned, results must also be unsigned. */
13707 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13708 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13709 int imm
= inst
.operands
[2].imm
;
13710 /* This gets the bounds check, size encoding and immediate bits calculation
13714 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13715 VQMOVN.I<size> <Dd>, <Qm>. */
13718 inst
.operands
[2].present
= 0;
13719 inst
.instruction
= N_MNEM_vqmovn
;
13724 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13725 _("immediate out of range"));
13726 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
13730 do_neon_rshift_sat_narrow_u (void)
13732 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13733 or unsigned. If operands are unsigned, results must also be unsigned. */
13734 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13735 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13736 int imm
= inst
.operands
[2].imm
;
13737 /* This gets the bounds check, size encoding and immediate bits calculation
13741 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13742 VQMOVUN.I<size> <Dd>, <Qm>. */
13745 inst
.operands
[2].present
= 0;
13746 inst
.instruction
= N_MNEM_vqmovun
;
13751 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13752 _("immediate out of range"));
13753 /* FIXME: The manual is kind of unclear about what value U should have in
13754 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13756 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
13760 do_neon_movn (void)
13762 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13763 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13764 NEON_ENCODE (INTEGER
, inst
);
13765 neon_two_same (0, 1, et
.size
/ 2);
13769 do_neon_rshift_narrow (void)
13771 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13772 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13773 int imm
= inst
.operands
[2].imm
;
13774 /* This gets the bounds check, size encoding and immediate bits calculation
13778 /* If immediate is zero then we are a pseudo-instruction for
13779 VMOVN.I<size> <Dd>, <Qm> */
13782 inst
.operands
[2].present
= 0;
13783 inst
.instruction
= N_MNEM_vmovn
;
13788 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13789 _("immediate out of range for narrowing operation"));
13790 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
13794 do_neon_shll (void)
13796 /* FIXME: Type checking when lengthening. */
13797 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
13798 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
13799 unsigned imm
= inst
.operands
[2].imm
;
13801 if (imm
== et
.size
)
13803 /* Maximum shift variant. */
13804 NEON_ENCODE (INTEGER
, inst
);
13805 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13806 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13807 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13808 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13809 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13811 neon_dp_fixup (&inst
);
13815 /* A more-specific type check for non-max versions. */
13816 et
= neon_check_type (2, NS_QDI
,
13817 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13818 NEON_ENCODE (IMMED
, inst
);
13819 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
13823 /* Check the various types for the VCVT instruction, and return which version
13824 the current instruction is. */
13827 neon_cvt_flavour (enum neon_shape rs
)
13829 #define CVT_VAR(C,X,Y) \
13830 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13831 if (et.type != NT_invtype) \
13833 inst.error = NULL; \
13836 struct neon_type_el et
;
13837 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
13838 || rs
== NS_FF
) ? N_VFP
: 0;
13839 /* The instruction versions which take an immediate take one register
13840 argument, which is extended to the width of the full register. Thus the
13841 "source" and "destination" registers must have the same width. Hack that
13842 here by making the size equal to the key (wider, in this case) operand. */
13843 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
13845 CVT_VAR (0, N_S32
, N_F32
);
13846 CVT_VAR (1, N_U32
, N_F32
);
13847 CVT_VAR (2, N_F32
, N_S32
);
13848 CVT_VAR (3, N_F32
, N_U32
);
13849 /* Half-precision conversions. */
13850 CVT_VAR (4, N_F32
, N_F16
);
13851 CVT_VAR (5, N_F16
, N_F32
);
13855 /* VFP instructions. */
13856 CVT_VAR (6, N_F32
, N_F64
);
13857 CVT_VAR (7, N_F64
, N_F32
);
13858 CVT_VAR (8, N_S32
, N_F64
| key
);
13859 CVT_VAR (9, N_U32
, N_F64
| key
);
13860 CVT_VAR (10, N_F64
| key
, N_S32
);
13861 CVT_VAR (11, N_F64
| key
, N_U32
);
13862 /* VFP instructions with bitshift. */
13863 CVT_VAR (12, N_F32
| key
, N_S16
);
13864 CVT_VAR (13, N_F32
| key
, N_U16
);
13865 CVT_VAR (14, N_F64
| key
, N_S16
);
13866 CVT_VAR (15, N_F64
| key
, N_U16
);
13867 CVT_VAR (16, N_S16
, N_F32
| key
);
13868 CVT_VAR (17, N_U16
, N_F32
| key
);
13869 CVT_VAR (18, N_S16
, N_F64
| key
);
13870 CVT_VAR (19, N_U16
, N_F64
| key
);
13876 /* Neon-syntax VFP conversions. */
13879 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
13881 const char *opname
= 0;
13883 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
13885 /* Conversions with immediate bitshift. */
13886 const char *enc
[] =
13910 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13912 opname
= enc
[flavour
];
13913 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13914 _("operands 0 and 1 must be the same register"));
13915 inst
.operands
[1] = inst
.operands
[2];
13916 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
13921 /* Conversions without bitshift. */
13922 const char *enc
[] =
13938 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13939 opname
= enc
[flavour
];
13943 do_vfp_nsyn_opcode (opname
);
13947 do_vfp_nsyn_cvtz (void)
13949 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
13950 int flavour
= neon_cvt_flavour (rs
);
13951 const char *enc
[] =
13965 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
13966 do_vfp_nsyn_opcode (enc
[flavour
]);
13970 do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED
)
13972 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
13973 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
13974 int flavour
= neon_cvt_flavour (rs
);
13976 /* PR11109: Handle round-to-zero for VCVT conversions. */
13978 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
13979 && (flavour
== 0 || flavour
== 1 || flavour
== 8 || flavour
== 9)
13980 && (rs
== NS_FD
|| rs
== NS_FF
))
13982 do_vfp_nsyn_cvtz ();
13986 /* VFP rather than Neon conversions. */
13989 do_vfp_nsyn_cvt (rs
, flavour
);
13999 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14001 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14004 /* Fixed-point conversion with #0 immediate is encoded as an
14005 integer conversion. */
14006 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
14008 immbits
= 32 - inst
.operands
[2].imm
;
14009 NEON_ENCODE (IMMED
, inst
);
14011 inst
.instruction
|= enctab
[flavour
];
14012 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14013 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14014 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14015 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14016 inst
.instruction
|= neon_quad (rs
) << 6;
14017 inst
.instruction
|= 1 << 21;
14018 inst
.instruction
|= immbits
<< 16;
14020 neon_dp_fixup (&inst
);
14028 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
14030 NEON_ENCODE (INTEGER
, inst
);
14032 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14036 inst
.instruction
|= enctab
[flavour
];
14038 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14039 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14040 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14041 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14042 inst
.instruction
|= neon_quad (rs
) << 6;
14043 inst
.instruction
|= 2 << 18;
14045 neon_dp_fixup (&inst
);
14049 /* Half-precision conversions for Advanced SIMD -- neon. */
14054 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
14056 as_bad (_("operand size must match register width"));
14061 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
14063 as_bad (_("operand size must match register width"));
14068 inst
.instruction
= 0x3b60600;
14070 inst
.instruction
= 0x3b60700;
14072 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14073 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14074 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14075 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14076 neon_dp_fixup (&inst
);
14080 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
14081 do_vfp_nsyn_cvt (rs
, flavour
);
14086 do_neon_cvtr (void)
14088 do_neon_cvt_1 (FALSE
);
14094 do_neon_cvt_1 (TRUE
);
14098 do_neon_cvtb (void)
14100 inst
.instruction
= 0xeb20a40;
14102 /* The sizes are attached to the mnemonic. */
14103 if (inst
.vectype
.el
[0].type
!= NT_invtype
14104 && inst
.vectype
.el
[0].size
== 16)
14105 inst
.instruction
|= 0x00010000;
14107 /* Programmer's syntax: the sizes are attached to the operands. */
14108 else if (inst
.operands
[0].vectype
.type
!= NT_invtype
14109 && inst
.operands
[0].vectype
.size
== 16)
14110 inst
.instruction
|= 0x00010000;
14112 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
14113 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
14114 do_vfp_cond_or_thumb ();
14119 do_neon_cvtt (void)
14122 inst
.instruction
|= 0x80;
14126 neon_move_immediate (void)
14128 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
14129 struct neon_type_el et
= neon_check_type (2, rs
,
14130 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14131 unsigned immlo
, immhi
= 0, immbits
;
14132 int op
, cmode
, float_p
;
14134 constraint (et
.type
== NT_invtype
,
14135 _("operand size must be specified for immediate VMOV"));
14137 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
14138 op
= (inst
.instruction
& (1 << 5)) != 0;
14140 immlo
= inst
.operands
[1].imm
;
14141 if (inst
.operands
[1].regisimm
)
14142 immhi
= inst
.operands
[1].reg
;
14144 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
14145 _("immediate has bits set outside the operand size"));
14147 float_p
= inst
.operands
[1].immisfloat
;
14149 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
14150 et
.size
, et
.type
)) == FAIL
)
14152 /* Invert relevant bits only. */
14153 neon_invert_size (&immlo
, &immhi
, et
.size
);
14154 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14155 with one or the other; those cases are caught by
14156 neon_cmode_for_move_imm. */
14158 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
14159 &op
, et
.size
, et
.type
)) == FAIL
)
14161 first_error (_("immediate out of range"));
14166 inst
.instruction
&= ~(1 << 5);
14167 inst
.instruction
|= op
<< 5;
14169 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14170 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14171 inst
.instruction
|= neon_quad (rs
) << 6;
14172 inst
.instruction
|= cmode
<< 8;
14174 neon_write_immbits (immbits
);
14180 if (inst
.operands
[1].isreg
)
14182 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14184 NEON_ENCODE (INTEGER
, inst
);
14185 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14186 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14187 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14188 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14189 inst
.instruction
|= neon_quad (rs
) << 6;
14193 NEON_ENCODE (IMMED
, inst
);
14194 neon_move_immediate ();
14197 neon_dp_fixup (&inst
);
14200 /* Encode instructions of form:
14202 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14203 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
14206 neon_mixed_length (struct neon_type_el et
, unsigned size
)
14208 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14209 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14210 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14211 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14212 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14213 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14214 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
14215 inst
.instruction
|= neon_logbits (size
) << 20;
14217 neon_dp_fixup (&inst
);
14221 do_neon_dyadic_long (void)
14223 /* FIXME: Type checking for lengthening op. */
14224 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14225 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
14226 neon_mixed_length (et
, et
.size
);
14230 do_neon_abal (void)
14232 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14233 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
14234 neon_mixed_length (et
, et
.size
);
14238 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
14240 if (inst
.operands
[2].isscalar
)
14242 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
14243 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
14244 NEON_ENCODE (SCALAR
, inst
);
14245 neon_mul_mac (et
, et
.type
== NT_unsigned
);
14249 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14250 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
14251 NEON_ENCODE (INTEGER
, inst
);
14252 neon_mixed_length (et
, et
.size
);
14257 do_neon_mac_maybe_scalar_long (void)
14259 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
14263 do_neon_dyadic_wide (void)
14265 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
14266 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
14267 neon_mixed_length (et
, et
.size
);
14271 do_neon_dyadic_narrow (void)
14273 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14274 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
14275 /* Operand sign is unimportant, and the U bit is part of the opcode,
14276 so force the operand type to integer. */
14277 et
.type
= NT_integer
;
14278 neon_mixed_length (et
, et
.size
/ 2);
14282 do_neon_mul_sat_scalar_long (void)
14284 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
14288 do_neon_vmull (void)
14290 if (inst
.operands
[2].isscalar
)
14291 do_neon_mac_maybe_scalar_long ();
14294 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14295 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
14296 if (et
.type
== NT_poly
)
14297 NEON_ENCODE (POLY
, inst
);
14299 NEON_ENCODE (INTEGER
, inst
);
14300 /* For polynomial encoding, size field must be 0b00 and the U bit must be
14301 zero. Should be OK as-is. */
14302 neon_mixed_length (et
, et
.size
);
14309 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
14310 struct neon_type_el et
= neon_check_type (3, rs
,
14311 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14312 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
14314 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
14315 _("shift out of range"));
14316 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14317 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14318 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14319 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14320 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14321 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14322 inst
.instruction
|= neon_quad (rs
) << 6;
14323 inst
.instruction
|= imm
<< 8;
14325 neon_dp_fixup (&inst
);
14331 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14332 struct neon_type_el et
= neon_check_type (2, rs
,
14333 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14334 unsigned op
= (inst
.instruction
>> 7) & 3;
14335 /* N (width of reversed regions) is encoded as part of the bitmask. We
14336 extract it here to check the elements to be reversed are smaller.
14337 Otherwise we'd get a reserved instruction. */
14338 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
14339 gas_assert (elsize
!= 0);
14340 constraint (et
.size
>= elsize
,
14341 _("elements must be smaller than reversal region"));
14342 neon_two_same (neon_quad (rs
), 1, et
.size
);
14348 if (inst
.operands
[1].isscalar
)
14350 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
14351 struct neon_type_el et
= neon_check_type (2, rs
,
14352 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14353 unsigned sizebits
= et
.size
>> 3;
14354 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
14355 int logsize
= neon_logbits (et
.size
);
14356 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
14358 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
14361 NEON_ENCODE (SCALAR
, inst
);
14362 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14363 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14364 inst
.instruction
|= LOW4 (dm
);
14365 inst
.instruction
|= HI1 (dm
) << 5;
14366 inst
.instruction
|= neon_quad (rs
) << 6;
14367 inst
.instruction
|= x
<< 17;
14368 inst
.instruction
|= sizebits
<< 16;
14370 neon_dp_fixup (&inst
);
14374 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
14375 struct neon_type_el et
= neon_check_type (2, rs
,
14376 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
14377 /* Duplicate ARM register to lanes of vector. */
14378 NEON_ENCODE (ARMREG
, inst
);
14381 case 8: inst
.instruction
|= 0x400000; break;
14382 case 16: inst
.instruction
|= 0x000020; break;
14383 case 32: inst
.instruction
|= 0x000000; break;
14386 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
14387 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
14388 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
14389 inst
.instruction
|= neon_quad (rs
) << 21;
14390 /* The encoding for this instruction is identical for the ARM and Thumb
14391 variants, except for the condition field. */
14392 do_vfp_cond_or_thumb ();
14396 /* VMOV has particularly many variations. It can be one of:
14397 0. VMOV<c><q> <Qd>, <Qm>
14398 1. VMOV<c><q> <Dd>, <Dm>
14399 (Register operations, which are VORR with Rm = Rn.)
14400 2. VMOV<c><q>.<dt> <Qd>, #<imm>
14401 3. VMOV<c><q>.<dt> <Dd>, #<imm>
14403 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14404 (ARM register to scalar.)
14405 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14406 (Two ARM registers to vector.)
14407 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14408 (Scalar to ARM register.)
14409 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14410 (Vector to two ARM registers.)
14411 8. VMOV.F32 <Sd>, <Sm>
14412 9. VMOV.F64 <Dd>, <Dm>
14413 (VFP register moves.)
14414 10. VMOV.F32 <Sd>, #imm
14415 11. VMOV.F64 <Dd>, #imm
14416 (VFP float immediate load.)
14417 12. VMOV <Rd>, <Sm>
14418 (VFP single to ARM reg.)
14419 13. VMOV <Sd>, <Rm>
14420 (ARM reg to VFP single.)
14421 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14422 (Two ARM regs to two VFP singles.)
14423 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14424 (Two VFP singles to two ARM regs.)
14426 These cases can be disambiguated using neon_select_shape, except cases 1/9
14427 and 3/11 which depend on the operand type too.
14429 All the encoded bits are hardcoded by this function.
14431 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14432 Cases 5, 7 may be used with VFPv2 and above.
14434 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14435 can specify a type where it doesn't make sense to, and is ignored). */
14440 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
14441 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
14443 struct neon_type_el et
;
14444 const char *ldconst
= 0;
14448 case NS_DD
: /* case 1/9. */
14449 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
14450 /* It is not an error here if no type is given. */
14452 if (et
.type
== NT_float
&& et
.size
== 64)
14454 do_vfp_nsyn_opcode ("fcpyd");
14457 /* fall through. */
14459 case NS_QQ
: /* case 0/1. */
14461 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14463 /* The architecture manual I have doesn't explicitly state which
14464 value the U bit should have for register->register moves, but
14465 the equivalent VORR instruction has U = 0, so do that. */
14466 inst
.instruction
= 0x0200110;
14467 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14468 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14469 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14470 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14471 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14472 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14473 inst
.instruction
|= neon_quad (rs
) << 6;
14475 neon_dp_fixup (&inst
);
14479 case NS_DI
: /* case 3/11. */
14480 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
14482 if (et
.type
== NT_float
&& et
.size
== 64)
14484 /* case 11 (fconstd). */
14485 ldconst
= "fconstd";
14486 goto encode_fconstd
;
14488 /* fall through. */
14490 case NS_QI
: /* case 2/3. */
14491 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14493 inst
.instruction
= 0x0800010;
14494 neon_move_immediate ();
14495 neon_dp_fixup (&inst
);
14498 case NS_SR
: /* case 4. */
14500 unsigned bcdebits
= 0;
14502 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
14503 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
14505 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
14506 logsize
= neon_logbits (et
.size
);
14508 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14510 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14511 && et
.size
!= 32, _(BAD_FPU
));
14512 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14513 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14517 case 8: bcdebits
= 0x8; break;
14518 case 16: bcdebits
= 0x1; break;
14519 case 32: bcdebits
= 0x0; break;
14523 bcdebits
|= x
<< logsize
;
14525 inst
.instruction
= 0xe000b10;
14526 do_vfp_cond_or_thumb ();
14527 inst
.instruction
|= LOW4 (dn
) << 16;
14528 inst
.instruction
|= HI1 (dn
) << 7;
14529 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14530 inst
.instruction
|= (bcdebits
& 3) << 5;
14531 inst
.instruction
|= (bcdebits
>> 2) << 21;
14535 case NS_DRR
: /* case 5 (fmdrr). */
14536 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14539 inst
.instruction
= 0xc400b10;
14540 do_vfp_cond_or_thumb ();
14541 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
14542 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
14543 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14544 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
14547 case NS_RS
: /* case 6. */
14550 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
14551 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
14552 unsigned abcdebits
= 0;
14554 et
= neon_check_type (2, NS_NULL
,
14555 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
14556 logsize
= neon_logbits (et
.size
);
14558 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14560 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14561 && et
.size
!= 32, _(BAD_FPU
));
14562 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14563 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14567 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
14568 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
14569 case 32: abcdebits
= 0x00; break;
14573 abcdebits
|= x
<< logsize
;
14574 inst
.instruction
= 0xe100b10;
14575 do_vfp_cond_or_thumb ();
14576 inst
.instruction
|= LOW4 (dn
) << 16;
14577 inst
.instruction
|= HI1 (dn
) << 7;
14578 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14579 inst
.instruction
|= (abcdebits
& 3) << 5;
14580 inst
.instruction
|= (abcdebits
>> 2) << 21;
14584 case NS_RRD
: /* case 7 (fmrrd). */
14585 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14588 inst
.instruction
= 0xc500b10;
14589 do_vfp_cond_or_thumb ();
14590 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14591 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14592 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14593 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14596 case NS_FF
: /* case 8 (fcpys). */
14597 do_vfp_nsyn_opcode ("fcpys");
14600 case NS_FI
: /* case 10 (fconsts). */
14601 ldconst
= "fconsts";
14603 if (is_quarter_float (inst
.operands
[1].imm
))
14605 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
14606 do_vfp_nsyn_opcode (ldconst
);
14609 first_error (_("immediate out of range"));
14612 case NS_RF
: /* case 12 (fmrs). */
14613 do_vfp_nsyn_opcode ("fmrs");
14616 case NS_FR
: /* case 13 (fmsr). */
14617 do_vfp_nsyn_opcode ("fmsr");
14620 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14621 (one of which is a list), but we have parsed four. Do some fiddling to
14622 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14624 case NS_RRFF
: /* case 14 (fmrrs). */
14625 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
14626 _("VFP registers must be adjacent"));
14627 inst
.operands
[2].imm
= 2;
14628 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14629 do_vfp_nsyn_opcode ("fmrrs");
14632 case NS_FFRR
: /* case 15 (fmsrr). */
14633 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
14634 _("VFP registers must be adjacent"));
14635 inst
.operands
[1] = inst
.operands
[2];
14636 inst
.operands
[2] = inst
.operands
[3];
14637 inst
.operands
[0].imm
= 2;
14638 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14639 do_vfp_nsyn_opcode ("fmsrr");
14648 do_neon_rshift_round_imm (void)
14650 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14651 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14652 int imm
= inst
.operands
[2].imm
;
14654 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14657 inst
.operands
[2].present
= 0;
14662 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14663 _("immediate out of range for shift"));
14664 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
14669 do_neon_movl (void)
14671 struct neon_type_el et
= neon_check_type (2, NS_QD
,
14672 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
14673 unsigned sizebits
= et
.size
>> 3;
14674 inst
.instruction
|= sizebits
<< 19;
14675 neon_two_same (0, et
.type
== NT_unsigned
, -1);
14681 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14682 struct neon_type_el et
= neon_check_type (2, rs
,
14683 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14684 NEON_ENCODE (INTEGER
, inst
);
14685 neon_two_same (neon_quad (rs
), 1, et
.size
);
14689 do_neon_zip_uzp (void)
14691 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14692 struct neon_type_el et
= neon_check_type (2, rs
,
14693 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14694 if (rs
== NS_DD
&& et
.size
== 32)
14696 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14697 inst
.instruction
= N_MNEM_vtrn
;
14701 neon_two_same (neon_quad (rs
), 1, et
.size
);
14705 do_neon_sat_abs_neg (void)
14707 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14708 struct neon_type_el et
= neon_check_type (2, rs
,
14709 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14710 neon_two_same (neon_quad (rs
), 1, et
.size
);
14714 do_neon_pair_long (void)
14716 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14717 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
14718 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14719 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
14720 neon_two_same (neon_quad (rs
), 1, et
.size
);
14724 do_neon_recip_est (void)
14726 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14727 struct neon_type_el et
= neon_check_type (2, rs
,
14728 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
14729 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14730 neon_two_same (neon_quad (rs
), 1, et
.size
);
14736 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14737 struct neon_type_el et
= neon_check_type (2, rs
,
14738 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14739 neon_two_same (neon_quad (rs
), 1, et
.size
);
14745 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14746 struct neon_type_el et
= neon_check_type (2, rs
,
14747 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
14748 neon_two_same (neon_quad (rs
), 1, et
.size
);
14754 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14755 struct neon_type_el et
= neon_check_type (2, rs
,
14756 N_EQK
| N_INT
, N_8
| N_KEY
);
14757 neon_two_same (neon_quad (rs
), 1, et
.size
);
14763 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14764 neon_two_same (neon_quad (rs
), 1, -1);
14768 do_neon_tbl_tbx (void)
14770 unsigned listlenbits
;
14771 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
14773 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
14775 first_error (_("bad list length for table lookup"));
14779 listlenbits
= inst
.operands
[1].imm
- 1;
14780 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14781 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14782 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14783 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14784 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14785 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14786 inst
.instruction
|= listlenbits
<< 8;
14788 neon_dp_fixup (&inst
);
14792 do_neon_ldm_stm (void)
14794 /* P, U and L bits are part of bitmask. */
14795 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
14796 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
14798 if (inst
.operands
[1].issingle
)
14800 do_vfp_nsyn_ldm_stm (is_dbmode
);
14804 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
14805 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14807 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14808 _("register list must contain at least 1 and at most 16 "
14811 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
14812 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
14813 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
14814 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
14816 inst
.instruction
|= offsetbits
;
14818 do_vfp_cond_or_thumb ();
14822 do_neon_ldr_str (void)
14824 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
14826 if (inst
.operands
[0].issingle
)
14829 do_vfp_nsyn_opcode ("flds");
14831 do_vfp_nsyn_opcode ("fsts");
14836 do_vfp_nsyn_opcode ("fldd");
14838 do_vfp_nsyn_opcode ("fstd");
14842 /* "interleave" version also handles non-interleaving register VLD1/VST1
14846 do_neon_ld_st_interleave (void)
14848 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
14849 N_8
| N_16
| N_32
| N_64
);
14850 unsigned alignbits
= 0;
14852 /* The bits in this table go:
14853 0: register stride of one (0) or two (1)
14854 1,2: register list length, minus one (1, 2, 3, 4).
14855 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14856 We use -1 for invalid entries. */
14857 const int typetable
[] =
14859 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14860 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14861 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14862 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14866 if (et
.type
== NT_invtype
)
14869 if (inst
.operands
[1].immisalign
)
14870 switch (inst
.operands
[1].imm
>> 8)
14872 case 64: alignbits
= 1; break;
14874 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
14875 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
14876 goto bad_alignment
;
14880 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
14881 goto bad_alignment
;
14886 first_error (_("bad alignment"));
14890 inst
.instruction
|= alignbits
<< 4;
14891 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14893 /* Bits [4:6] of the immediate in a list specifier encode register stride
14894 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14895 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14896 up the right value for "type" in a table based on this value and the given
14897 list style, then stick it back. */
14898 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
14899 | (((inst
.instruction
>> 8) & 3) << 3);
14901 typebits
= typetable
[idx
];
14903 constraint (typebits
== -1, _("bad list type for instruction"));
14905 inst
.instruction
&= ~0xf00;
14906 inst
.instruction
|= typebits
<< 8;
14909 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14910 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14911 otherwise. The variable arguments are a list of pairs of legal (size, align)
14912 values, terminated with -1. */
14915 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
14918 int result
= FAIL
, thissize
, thisalign
;
14920 if (!inst
.operands
[1].immisalign
)
14926 va_start (ap
, do_align
);
14930 thissize
= va_arg (ap
, int);
14931 if (thissize
== -1)
14933 thisalign
= va_arg (ap
, int);
14935 if (size
== thissize
&& align
== thisalign
)
14938 while (result
!= SUCCESS
);
14942 if (result
== SUCCESS
)
14945 first_error (_("unsupported alignment for instruction"));
14951 do_neon_ld_st_lane (void)
14953 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14954 int align_good
, do_align
= 0;
14955 int logsize
= neon_logbits (et
.size
);
14956 int align
= inst
.operands
[1].imm
>> 8;
14957 int n
= (inst
.instruction
>> 8) & 3;
14958 int max_el
= 64 / et
.size
;
14960 if (et
.type
== NT_invtype
)
14963 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
14964 _("bad list length"));
14965 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
14966 _("scalar index out of range"));
14967 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
14969 _("stride of 2 unavailable when element size is 8"));
14973 case 0: /* VLD1 / VST1. */
14974 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
14976 if (align_good
== FAIL
)
14980 unsigned alignbits
= 0;
14983 case 16: alignbits
= 0x1; break;
14984 case 32: alignbits
= 0x3; break;
14987 inst
.instruction
|= alignbits
<< 4;
14991 case 1: /* VLD2 / VST2. */
14992 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
14994 if (align_good
== FAIL
)
14997 inst
.instruction
|= 1 << 4;
15000 case 2: /* VLD3 / VST3. */
15001 constraint (inst
.operands
[1].immisalign
,
15002 _("can't use alignment with this instruction"));
15005 case 3: /* VLD4 / VST4. */
15006 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
15007 16, 64, 32, 64, 32, 128, -1);
15008 if (align_good
== FAIL
)
15012 unsigned alignbits
= 0;
15015 case 8: alignbits
= 0x1; break;
15016 case 16: alignbits
= 0x1; break;
15017 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
15020 inst
.instruction
|= alignbits
<< 4;
15027 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
15028 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
15029 inst
.instruction
|= 1 << (4 + logsize
);
15031 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
15032 inst
.instruction
|= logsize
<< 10;
15035 /* Encode single n-element structure to all lanes VLD<n> instructions. */
15038 do_neon_ld_dup (void)
15040 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
15041 int align_good
, do_align
= 0;
15043 if (et
.type
== NT_invtype
)
15046 switch ((inst
.instruction
>> 8) & 3)
15048 case 0: /* VLD1. */
15049 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
15050 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
15051 &do_align
, 16, 16, 32, 32, -1);
15052 if (align_good
== FAIL
)
15054 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
15057 case 2: inst
.instruction
|= 1 << 5; break;
15058 default: first_error (_("bad list length")); return;
15060 inst
.instruction
|= neon_logbits (et
.size
) << 6;
15063 case 1: /* VLD2. */
15064 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
15065 &do_align
, 8, 16, 16, 32, 32, 64, -1);
15066 if (align_good
== FAIL
)
15068 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
15069 _("bad list length"));
15070 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
15071 inst
.instruction
|= 1 << 5;
15072 inst
.instruction
|= neon_logbits (et
.size
) << 6;
15075 case 2: /* VLD3. */
15076 constraint (inst
.operands
[1].immisalign
,
15077 _("can't use alignment with this instruction"));
15078 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
15079 _("bad list length"));
15080 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
15081 inst
.instruction
|= 1 << 5;
15082 inst
.instruction
|= neon_logbits (et
.size
) << 6;
15085 case 3: /* VLD4. */
15087 int align
= inst
.operands
[1].imm
>> 8;
15088 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
15089 16, 64, 32, 64, 32, 128, -1);
15090 if (align_good
== FAIL
)
15092 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
15093 _("bad list length"));
15094 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
15095 inst
.instruction
|= 1 << 5;
15096 if (et
.size
== 32 && align
== 128)
15097 inst
.instruction
|= 0x3 << 6;
15099 inst
.instruction
|= neon_logbits (et
.size
) << 6;
15106 inst
.instruction
|= do_align
<< 4;
15109 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15110 apart from bits [11:4]. */
15113 do_neon_ldx_stx (void)
15115 if (inst
.operands
[1].isreg
)
15116 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
15118 switch (NEON_LANE (inst
.operands
[0].imm
))
15120 case NEON_INTERLEAVE_LANES
:
15121 NEON_ENCODE (INTERLV
, inst
);
15122 do_neon_ld_st_interleave ();
15125 case NEON_ALL_LANES
:
15126 NEON_ENCODE (DUP
, inst
);
15131 NEON_ENCODE (LANE
, inst
);
15132 do_neon_ld_st_lane ();
15135 /* L bit comes from bit mask. */
15136 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15137 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15138 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
15140 if (inst
.operands
[1].postind
)
15142 int postreg
= inst
.operands
[1].imm
& 0xf;
15143 constraint (!inst
.operands
[1].immisreg
,
15144 _("post-index must be a register"));
15145 constraint (postreg
== 0xd || postreg
== 0xf,
15146 _("bad register for post-index"));
15147 inst
.instruction
|= postreg
;
15149 else if (inst
.operands
[1].writeback
)
15151 inst
.instruction
|= 0xd;
15154 inst
.instruction
|= 0xf;
15157 inst
.instruction
|= 0xf9000000;
15159 inst
.instruction
|= 0xf4000000;
15162 /* Overall per-instruction processing. */
15164 /* We need to be able to fix up arbitrary expressions in some statements.
15165 This is so that we can handle symbols that are an arbitrary distance from
15166 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
15167 which returns part of an address in a form which will be valid for
15168 a data instruction. We do this by pushing the expression into a symbol
15169 in the expr_section, and creating a fix for that. */
15172 fix_new_arm (fragS
* frag
,
15187 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
15188 (enum bfd_reloc_code_real
) reloc
);
15192 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
15193 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
15197 /* Mark whether the fix is to a THUMB instruction, or an ARM
15199 new_fix
->tc_fix_data
= thumb_mode
;
15202 /* Create a frg for an instruction requiring relaxation. */
15204 output_relax_insn (void)
15210 /* The size of the instruction is unknown, so tie the debug info to the
15211 start of the instruction. */
15212 dwarf2_emit_insn (0);
15214 switch (inst
.reloc
.exp
.X_op
)
15217 sym
= inst
.reloc
.exp
.X_add_symbol
;
15218 offset
= inst
.reloc
.exp
.X_add_number
;
15222 offset
= inst
.reloc
.exp
.X_add_number
;
15225 sym
= make_expr_symbol (&inst
.reloc
.exp
);
15229 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
15230 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
15231 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
15234 /* Write a 32-bit thumb instruction to buf. */
15236 put_thumb32_insn (char * buf
, unsigned long insn
)
15238 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
15239 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
15243 output_inst (const char * str
)
15249 as_bad ("%s -- `%s'", inst
.error
, str
);
15254 output_relax_insn ();
15257 if (inst
.size
== 0)
15260 to
= frag_more (inst
.size
);
15261 /* PR 9814: Record the thumb mode into the current frag so that we know
15262 what type of NOP padding to use, if necessary. We override any previous
15263 setting so that if the mode has changed then the NOPS that we use will
15264 match the encoding of the last instruction in the frag. */
15265 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
15267 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
15269 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
15270 put_thumb32_insn (to
, inst
.instruction
);
15272 else if (inst
.size
> INSN_SIZE
)
15274 gas_assert (inst
.size
== (2 * INSN_SIZE
));
15275 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
15276 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
15279 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
15281 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
15282 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
15283 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
15286 dwarf2_emit_insn (inst
.size
);
15290 output_it_inst (int cond
, int mask
, char * to
)
15292 unsigned long instruction
= 0xbf00;
15295 instruction
|= mask
;
15296 instruction
|= cond
<< 4;
15300 to
= frag_more (2);
15302 dwarf2_emit_insn (2);
15306 md_number_to_chars (to
, instruction
, 2);
15311 /* Tag values used in struct asm_opcode's tag field. */
15314 OT_unconditional
, /* Instruction cannot be conditionalized.
15315 The ARM condition field is still 0xE. */
15316 OT_unconditionalF
, /* Instruction cannot be conditionalized
15317 and carries 0xF in its ARM condition field. */
15318 OT_csuffix
, /* Instruction takes a conditional suffix. */
15319 OT_csuffixF
, /* Some forms of the instruction take a conditional
15320 suffix, others place 0xF where the condition field
15322 OT_cinfix3
, /* Instruction takes a conditional infix,
15323 beginning at character index 3. (In
15324 unified mode, it becomes a suffix.) */
15325 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
15326 tsts, cmps, cmns, and teqs. */
15327 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
15328 character index 3, even in unified mode. Used for
15329 legacy instructions where suffix and infix forms
15330 may be ambiguous. */
15331 OT_csuf_or_in3
, /* Instruction takes either a conditional
15332 suffix or an infix at character index 3. */
15333 OT_odd_infix_unc
, /* This is the unconditional variant of an
15334 instruction that takes a conditional infix
15335 at an unusual position. In unified mode,
15336 this variant will accept a suffix. */
15337 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
15338 are the conditional variants of instructions that
15339 take conditional infixes in unusual positions.
15340 The infix appears at character index
15341 (tag - OT_odd_infix_0). These are not accepted
15342 in unified mode. */
15345 /* Subroutine of md_assemble, responsible for looking up the primary
15346 opcode from the mnemonic the user wrote. STR points to the
15347 beginning of the mnemonic.
15349 This is not simply a hash table lookup, because of conditional
15350 variants. Most instructions have conditional variants, which are
15351 expressed with a _conditional affix_ to the mnemonic. If we were
15352 to encode each conditional variant as a literal string in the opcode
15353 table, it would have approximately 20,000 entries.
15355 Most mnemonics take this affix as a suffix, and in unified syntax,
15356 'most' is upgraded to 'all'. However, in the divided syntax, some
15357 instructions take the affix as an infix, notably the s-variants of
15358 the arithmetic instructions. Of those instructions, all but six
15359 have the infix appear after the third character of the mnemonic.
15361 Accordingly, the algorithm for looking up primary opcodes given
15364 1. Look up the identifier in the opcode table.
15365 If we find a match, go to step U.
15367 2. Look up the last two characters of the identifier in the
15368 conditions table. If we find a match, look up the first N-2
15369 characters of the identifier in the opcode table. If we
15370 find a match, go to step CE.
15372 3. Look up the fourth and fifth characters of the identifier in
15373 the conditions table. If we find a match, extract those
15374 characters from the identifier, and look up the remaining
15375 characters in the opcode table. If we find a match, go
15380 U. Examine the tag field of the opcode structure, in case this is
15381 one of the six instructions with its conditional infix in an
15382 unusual place. If it is, the tag tells us where to find the
15383 infix; look it up in the conditions table and set inst.cond
15384 accordingly. Otherwise, this is an unconditional instruction.
15385 Again set inst.cond accordingly. Return the opcode structure.
15387 CE. Examine the tag field to make sure this is an instruction that
15388 should receive a conditional suffix. If it is not, fail.
15389 Otherwise, set inst.cond from the suffix we already looked up,
15390 and return the opcode structure.
15392 CM. Examine the tag field to make sure this is an instruction that
15393 should receive a conditional infix after the third character.
15394 If it is not, fail. Otherwise, undo the edits to the current
15395 line of input and proceed as for case CE. */
15397 static const struct asm_opcode
*
15398 opcode_lookup (char **str
)
15402 const struct asm_opcode
*opcode
;
15403 const struct asm_cond
*cond
;
15406 /* Scan up to the end of the mnemonic, which must end in white space,
15407 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
15408 for (base
= end
= *str
; *end
!= '\0'; end
++)
15409 if (*end
== ' ' || *end
== '.')
15415 /* Handle a possible width suffix and/or Neon type suffix. */
15420 /* The .w and .n suffixes are only valid if the unified syntax is in
15422 if (unified_syntax
&& end
[1] == 'w')
15424 else if (unified_syntax
&& end
[1] == 'n')
15429 inst
.vectype
.elems
= 0;
15431 *str
= end
+ offset
;
15433 if (end
[offset
] == '.')
15435 /* See if we have a Neon type suffix (possible in either unified or
15436 non-unified ARM syntax mode). */
15437 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
15440 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
15446 /* Look for unaffixed or special-case affixed mnemonic. */
15447 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
15452 if (opcode
->tag
< OT_odd_infix_0
)
15454 inst
.cond
= COND_ALWAYS
;
15458 if (warn_on_deprecated
&& unified_syntax
)
15459 as_warn (_("conditional infixes are deprecated in unified syntax"));
15460 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
15461 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
15464 inst
.cond
= cond
->value
;
15468 /* Cannot have a conditional suffix on a mnemonic of less than two
15470 if (end
- base
< 3)
15473 /* Look for suffixed mnemonic. */
15475 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
15476 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
15478 if (opcode
&& cond
)
15481 switch (opcode
->tag
)
15483 case OT_cinfix3_legacy
:
15484 /* Ignore conditional suffixes matched on infix only mnemonics. */
15488 case OT_cinfix3_deprecated
:
15489 case OT_odd_infix_unc
:
15490 if (!unified_syntax
)
15492 /* else fall through */
15496 case OT_csuf_or_in3
:
15497 inst
.cond
= cond
->value
;
15500 case OT_unconditional
:
15501 case OT_unconditionalF
:
15503 inst
.cond
= cond
->value
;
15506 /* Delayed diagnostic. */
15507 inst
.error
= BAD_COND
;
15508 inst
.cond
= COND_ALWAYS
;
15517 /* Cannot have a usual-position infix on a mnemonic of less than
15518 six characters (five would be a suffix). */
15519 if (end
- base
< 6)
15522 /* Look for infixed mnemonic in the usual position. */
15524 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
15528 memcpy (save
, affix
, 2);
15529 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
15530 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
15532 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
15533 memcpy (affix
, save
, 2);
15536 && (opcode
->tag
== OT_cinfix3
15537 || opcode
->tag
== OT_cinfix3_deprecated
15538 || opcode
->tag
== OT_csuf_or_in3
15539 || opcode
->tag
== OT_cinfix3_legacy
))
15542 if (warn_on_deprecated
&& unified_syntax
15543 && (opcode
->tag
== OT_cinfix3
15544 || opcode
->tag
== OT_cinfix3_deprecated
))
15545 as_warn (_("conditional infixes are deprecated in unified syntax"));
15547 inst
.cond
= cond
->value
;
15554 /* This function generates an initial IT instruction, leaving its block
15555 virtually open for the new instructions. Eventually,
15556 the mask will be updated by now_it_add_mask () each time
15557 a new instruction needs to be included in the IT block.
15558 Finally, the block is closed with close_automatic_it_block ().
15559 The block closure can be requested either from md_assemble (),
15560 a tencode (), or due to a label hook. */
15563 new_automatic_it_block (int cond
)
15565 now_it
.state
= AUTOMATIC_IT_BLOCK
;
15566 now_it
.mask
= 0x18;
15568 now_it
.block_length
= 1;
15569 mapping_state (MAP_THUMB
);
15570 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
15573 /* Close an automatic IT block.
15574 See comments in new_automatic_it_block (). */
15577 close_automatic_it_block (void)
15579 now_it
.mask
= 0x10;
15580 now_it
.block_length
= 0;
15583 /* Update the mask of the current automatically-generated IT
15584 instruction. See comments in new_automatic_it_block (). */
15587 now_it_add_mask (int cond
)
15589 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15590 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15591 | ((bitvalue) << (nbit)))
15592 const int resulting_bit
= (cond
& 1);
15594 now_it
.mask
&= 0xf;
15595 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15597 (5 - now_it
.block_length
));
15598 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15600 ((5 - now_it
.block_length
) - 1) );
15601 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
15604 #undef SET_BIT_VALUE
15607 /* The IT blocks handling machinery is accessed through the these functions:
15608 it_fsm_pre_encode () from md_assemble ()
15609 set_it_insn_type () optional, from the tencode functions
15610 set_it_insn_type_last () ditto
15611 in_it_block () ditto
15612 it_fsm_post_encode () from md_assemble ()
15613 force_automatic_it_block_close () from label habdling functions
15616 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15617 initializing the IT insn type with a generic initial value depending
15618 on the inst.condition.
15619 2) During the tencode function, two things may happen:
15620 a) The tencode function overrides the IT insn type by
15621 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15622 b) The tencode function queries the IT block state by
15623 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15625 Both set_it_insn_type and in_it_block run the internal FSM state
15626 handling function (handle_it_state), because: a) setting the IT insn
15627 type may incur in an invalid state (exiting the function),
15628 and b) querying the state requires the FSM to be updated.
15629 Specifically we want to avoid creating an IT block for conditional
15630 branches, so it_fsm_pre_encode is actually a guess and we can't
15631 determine whether an IT block is required until the tencode () routine
15632 has decided what type of instruction this actually it.
15633 Because of this, if set_it_insn_type and in_it_block have to be used,
15634 set_it_insn_type has to be called first.
15636 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15637 determines the insn IT type depending on the inst.cond code.
15638 When a tencode () routine encodes an instruction that can be
15639 either outside an IT block, or, in the case of being inside, has to be
15640 the last one, set_it_insn_type_last () will determine the proper
15641 IT instruction type based on the inst.cond code. Otherwise,
15642 set_it_insn_type can be called for overriding that logic or
15643 for covering other cases.
15645 Calling handle_it_state () may not transition the IT block state to
15646 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15647 still queried. Instead, if the FSM determines that the state should
15648 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15649 after the tencode () function: that's what it_fsm_post_encode () does.
15651 Since in_it_block () calls the state handling function to get an
15652 updated state, an error may occur (due to invalid insns combination).
15653 In that case, inst.error is set.
15654 Therefore, inst.error has to be checked after the execution of
15655 the tencode () routine.
15657 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15658 any pending state change (if any) that didn't take place in
15659 handle_it_state () as explained above. */
15662 it_fsm_pre_encode (void)
15664 if (inst
.cond
!= COND_ALWAYS
)
15665 inst
.it_insn_type
= INSIDE_IT_INSN
;
15667 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
15669 now_it
.state_handled
= 0;
15672 /* IT state FSM handling function. */
15675 handle_it_state (void)
15677 now_it
.state_handled
= 1;
15679 switch (now_it
.state
)
15681 case OUTSIDE_IT_BLOCK
:
15682 switch (inst
.it_insn_type
)
15684 case OUTSIDE_IT_INSN
:
15687 case INSIDE_IT_INSN
:
15688 case INSIDE_IT_LAST_INSN
:
15689 if (thumb_mode
== 0)
15692 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
15693 as_tsktsk (_("Warning: conditional outside an IT block"\
15698 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
15699 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
))
15701 /* Automatically generate the IT instruction. */
15702 new_automatic_it_block (inst
.cond
);
15703 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
15704 close_automatic_it_block ();
15708 inst
.error
= BAD_OUT_IT
;
15714 case IF_INSIDE_IT_LAST_INSN
:
15715 case NEUTRAL_IT_INSN
:
15719 now_it
.state
= MANUAL_IT_BLOCK
;
15720 now_it
.block_length
= 0;
15725 case AUTOMATIC_IT_BLOCK
:
15726 /* Three things may happen now:
15727 a) We should increment current it block size;
15728 b) We should close current it block (closing insn or 4 insns);
15729 c) We should close current it block and start a new one (due
15730 to incompatible conditions or
15731 4 insns-length block reached). */
15733 switch (inst
.it_insn_type
)
15735 case OUTSIDE_IT_INSN
:
15736 /* The closure of the block shall happen immediatelly,
15737 so any in_it_block () call reports the block as closed. */
15738 force_automatic_it_block_close ();
15741 case INSIDE_IT_INSN
:
15742 case INSIDE_IT_LAST_INSN
:
15743 case IF_INSIDE_IT_LAST_INSN
:
15744 now_it
.block_length
++;
15746 if (now_it
.block_length
> 4
15747 || !now_it_compatible (inst
.cond
))
15749 force_automatic_it_block_close ();
15750 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
15751 new_automatic_it_block (inst
.cond
);
15755 now_it_add_mask (inst
.cond
);
15758 if (now_it
.state
== AUTOMATIC_IT_BLOCK
15759 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
15760 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
15761 close_automatic_it_block ();
15764 case NEUTRAL_IT_INSN
:
15765 now_it
.block_length
++;
15767 if (now_it
.block_length
> 4)
15768 force_automatic_it_block_close ();
15770 now_it_add_mask (now_it
.cc
& 1);
15774 close_automatic_it_block ();
15775 now_it
.state
= MANUAL_IT_BLOCK
;
15780 case MANUAL_IT_BLOCK
:
15782 /* Check conditional suffixes. */
15783 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
15786 now_it
.mask
&= 0x1f;
15787 is_last
= (now_it
.mask
== 0x10);
15789 switch (inst
.it_insn_type
)
15791 case OUTSIDE_IT_INSN
:
15792 inst
.error
= BAD_NOT_IT
;
15795 case INSIDE_IT_INSN
:
15796 if (cond
!= inst
.cond
)
15798 inst
.error
= BAD_IT_COND
;
15803 case INSIDE_IT_LAST_INSN
:
15804 case IF_INSIDE_IT_LAST_INSN
:
15805 if (cond
!= inst
.cond
)
15807 inst
.error
= BAD_IT_COND
;
15812 inst
.error
= BAD_BRANCH
;
15817 case NEUTRAL_IT_INSN
:
15818 /* The BKPT instruction is unconditional even in an IT block. */
15822 inst
.error
= BAD_IT_IT
;
15833 it_fsm_post_encode (void)
15837 if (!now_it
.state_handled
)
15838 handle_it_state ();
15840 is_last
= (now_it
.mask
== 0x10);
15843 now_it
.state
= OUTSIDE_IT_BLOCK
;
15849 force_automatic_it_block_close (void)
15851 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
15853 close_automatic_it_block ();
15854 now_it
.state
= OUTSIDE_IT_BLOCK
;
15862 if (!now_it
.state_handled
)
15863 handle_it_state ();
15865 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
15869 md_assemble (char *str
)
15872 const struct asm_opcode
* opcode
;
15874 /* Align the previous label if needed. */
15875 if (last_label_seen
!= NULL
)
15877 symbol_set_frag (last_label_seen
, frag_now
);
15878 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
15879 S_SET_SEGMENT (last_label_seen
, now_seg
);
15882 memset (&inst
, '\0', sizeof (inst
));
15883 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
15885 opcode
= opcode_lookup (&p
);
15888 /* It wasn't an instruction, but it might be a register alias of
15889 the form alias .req reg, or a Neon .dn/.qn directive. */
15890 if (! create_register_alias (str
, p
)
15891 && ! create_neon_reg_alias (str
, p
))
15892 as_bad (_("bad instruction `%s'"), str
);
15897 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
15898 as_warn (_("s suffix on comparison instruction is deprecated"));
15900 /* The value which unconditional instructions should have in place of the
15901 condition field. */
15902 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
15906 arm_feature_set variant
;
15908 variant
= cpu_variant
;
15909 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15910 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
15911 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
15912 /* Check that this instruction is supported for this CPU. */
15913 if (!opcode
->tvariant
15914 || (thumb_mode
== 1
15915 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
15917 as_bad (_("selected processor does not support Thumb mode `%s'"), str
);
15920 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
15921 && opcode
->tencode
!= do_t_branch
)
15923 as_bad (_("Thumb does not support conditional execution"));
15927 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
))
15929 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
15930 && !(ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
)
15931 || ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_barrier
)))
15933 /* Two things are addressed here.
15934 1) Implicit require narrow instructions on Thumb-1.
15935 This avoids relaxation accidentally introducing Thumb-2
15937 2) Reject wide instructions in non Thumb-2 cores. */
15938 if (inst
.size_req
== 0)
15940 else if (inst
.size_req
== 4)
15942 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str
);
15948 inst
.instruction
= opcode
->tvalue
;
15950 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
15952 /* Prepare the it_insn_type for those encodings that don't set
15954 it_fsm_pre_encode ();
15956 opcode
->tencode ();
15958 it_fsm_post_encode ();
15961 if (!(inst
.error
|| inst
.relax
))
15963 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
15964 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
15965 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
15967 as_bad (_("cannot honor width suffix -- `%s'"), str
);
15972 /* Something has gone badly wrong if we try to relax a fixed size
15974 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
15976 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15977 *opcode
->tvariant
);
15978 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15979 set those bits when Thumb-2 32-bit instructions are seen. ie.
15980 anything other than bl/blx and v6-M instructions.
15981 This is overly pessimistic for relaxable instructions. */
15982 if (((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
15984 && !(ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
15985 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
)))
15986 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15989 check_neon_suffixes
;
15993 mapping_state (MAP_THUMB
);
15996 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
16000 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
16001 is_bx
= (opcode
->aencode
== do_bx
);
16003 /* Check that this instruction is supported for this CPU. */
16004 if (!(is_bx
&& fix_v4bx
)
16005 && !(opcode
->avariant
&&
16006 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
16008 as_bad (_("selected processor does not support ARM mode `%s'"), str
);
16013 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
16017 inst
.instruction
= opcode
->avalue
;
16018 if (opcode
->tag
== OT_unconditionalF
)
16019 inst
.instruction
|= 0xF << 28;
16021 inst
.instruction
|= inst
.cond
<< 28;
16022 inst
.size
= INSN_SIZE
;
16023 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
16025 it_fsm_pre_encode ();
16026 opcode
->aencode ();
16027 it_fsm_post_encode ();
16029 /* Arm mode bx is marked as both v4T and v5 because it's still required
16030 on a hypothetical non-thumb v5 core. */
16032 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
16034 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
16035 *opcode
->avariant
);
16037 check_neon_suffixes
;
16041 mapping_state (MAP_ARM
);
16046 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
16054 check_it_blocks_finished (void)
16059 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
16060 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
16061 == MANUAL_IT_BLOCK
)
16063 as_warn (_("section '%s' finished with an open IT block."),
16067 if (now_it
.state
== MANUAL_IT_BLOCK
)
16068 as_warn (_("file finished with an open IT block."));
16072 /* Various frobbings of labels and their addresses. */
16075 arm_start_line_hook (void)
16077 last_label_seen
= NULL
;
16081 arm_frob_label (symbolS
* sym
)
16083 last_label_seen
= sym
;
16085 ARM_SET_THUMB (sym
, thumb_mode
);
16087 #if defined OBJ_COFF || defined OBJ_ELF
16088 ARM_SET_INTERWORK (sym
, support_interwork
);
16091 force_automatic_it_block_close ();
16093 /* Note - do not allow local symbols (.Lxxx) to be labelled
16094 as Thumb functions. This is because these labels, whilst
16095 they exist inside Thumb code, are not the entry points for
16096 possible ARM->Thumb calls. Also, these labels can be used
16097 as part of a computed goto or switch statement. eg gcc
16098 can generate code that looks like this:
16100 ldr r2, [pc, .Laaa]
16110 The first instruction loads the address of the jump table.
16111 The second instruction converts a table index into a byte offset.
16112 The third instruction gets the jump address out of the table.
16113 The fourth instruction performs the jump.
16115 If the address stored at .Laaa is that of a symbol which has the
16116 Thumb_Func bit set, then the linker will arrange for this address
16117 to have the bottom bit set, which in turn would mean that the
16118 address computation performed by the third instruction would end
16119 up with the bottom bit set. Since the ARM is capable of unaligned
16120 word loads, the instruction would then load the incorrect address
16121 out of the jump table, and chaos would ensue. */
16122 if (label_is_thumb_function_name
16123 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
16124 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
16126 /* When the address of a Thumb function is taken the bottom
16127 bit of that address should be set. This will allow
16128 interworking between Arm and Thumb functions to work
16131 THUMB_SET_FUNC (sym
, 1);
16133 label_is_thumb_function_name
= FALSE
;
16136 dwarf2_emit_label (sym
);
16140 arm_data_in_code (void)
16142 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
16144 *input_line_pointer
= '/';
16145 input_line_pointer
+= 5;
16146 *input_line_pointer
= 0;
16154 arm_canonicalize_symbol_name (char * name
)
16158 if (thumb_mode
&& (len
= strlen (name
)) > 5
16159 && streq (name
+ len
- 5, "/data"))
16160 *(name
+ len
- 5) = 0;
16165 /* Table of all register names defined by default. The user can
16166 define additional names with .req. Note that all register names
16167 should appear in both upper and lowercase variants. Some registers
16168 also have mixed-case names. */
16170 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
16171 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
16172 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
16173 #define REGSET(p,t) \
16174 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
16175 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
16176 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
16177 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
16178 #define REGSETH(p,t) \
16179 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
16180 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
16181 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
16182 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
16183 #define REGSET2(p,t) \
16184 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
16185 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
16186 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
16187 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
16189 static const struct reg_entry reg_names
[] =
16191 /* ARM integer registers. */
16192 REGSET(r
, RN
), REGSET(R
, RN
),
16194 /* ATPCS synonyms. */
16195 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
16196 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
16197 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
16199 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
16200 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
16201 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
16203 /* Well-known aliases. */
16204 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
16205 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
16207 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
16208 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
16210 /* Coprocessor numbers. */
16211 REGSET(p
, CP
), REGSET(P
, CP
),
16213 /* Coprocessor register numbers. The "cr" variants are for backward
16215 REGSET(c
, CN
), REGSET(C
, CN
),
16216 REGSET(cr
, CN
), REGSET(CR
, CN
),
16218 /* FPA registers. */
16219 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
16220 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
16222 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
16223 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
16225 /* VFP SP registers. */
16226 REGSET(s
,VFS
), REGSET(S
,VFS
),
16227 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
16229 /* VFP DP Registers. */
16230 REGSET(d
,VFD
), REGSET(D
,VFD
),
16231 /* Extra Neon DP registers. */
16232 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
16234 /* Neon QP registers. */
16235 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
16237 /* VFP control registers. */
16238 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
16239 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
16240 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
16241 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
16242 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
16243 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
16245 /* Maverick DSP coprocessor registers. */
16246 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
16247 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
16249 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
16250 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
16251 REGDEF(dspsc
,0,DSPSC
),
16253 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
16254 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
16255 REGDEF(DSPSC
,0,DSPSC
),
16257 /* iWMMXt data registers - p0, c0-15. */
16258 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
16260 /* iWMMXt control registers - p1, c0-3. */
16261 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
16262 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
16263 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
16264 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
16266 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
16267 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
16268 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
16269 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
16270 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
16272 /* XScale accumulator registers. */
16273 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
16279 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
16280 within psr_required_here. */
16281 static const struct asm_psr psrs
[] =
16283 /* Backward compatibility notation. Note that "all" is no longer
16284 truly all possible PSR bits. */
16285 {"all", PSR_c
| PSR_f
},
16289 /* Individual flags. */
16296 /* Combinations of flags. */
16297 {"fs", PSR_f
| PSR_s
},
16298 {"fx", PSR_f
| PSR_x
},
16299 {"fc", PSR_f
| PSR_c
},
16300 {"sf", PSR_s
| PSR_f
},
16301 {"sx", PSR_s
| PSR_x
},
16302 {"sc", PSR_s
| PSR_c
},
16303 {"xf", PSR_x
| PSR_f
},
16304 {"xs", PSR_x
| PSR_s
},
16305 {"xc", PSR_x
| PSR_c
},
16306 {"cf", PSR_c
| PSR_f
},
16307 {"cs", PSR_c
| PSR_s
},
16308 {"cx", PSR_c
| PSR_x
},
16309 {"fsx", PSR_f
| PSR_s
| PSR_x
},
16310 {"fsc", PSR_f
| PSR_s
| PSR_c
},
16311 {"fxs", PSR_f
| PSR_x
| PSR_s
},
16312 {"fxc", PSR_f
| PSR_x
| PSR_c
},
16313 {"fcs", PSR_f
| PSR_c
| PSR_s
},
16314 {"fcx", PSR_f
| PSR_c
| PSR_x
},
16315 {"sfx", PSR_s
| PSR_f
| PSR_x
},
16316 {"sfc", PSR_s
| PSR_f
| PSR_c
},
16317 {"sxf", PSR_s
| PSR_x
| PSR_f
},
16318 {"sxc", PSR_s
| PSR_x
| PSR_c
},
16319 {"scf", PSR_s
| PSR_c
| PSR_f
},
16320 {"scx", PSR_s
| PSR_c
| PSR_x
},
16321 {"xfs", PSR_x
| PSR_f
| PSR_s
},
16322 {"xfc", PSR_x
| PSR_f
| PSR_c
},
16323 {"xsf", PSR_x
| PSR_s
| PSR_f
},
16324 {"xsc", PSR_x
| PSR_s
| PSR_c
},
16325 {"xcf", PSR_x
| PSR_c
| PSR_f
},
16326 {"xcs", PSR_x
| PSR_c
| PSR_s
},
16327 {"cfs", PSR_c
| PSR_f
| PSR_s
},
16328 {"cfx", PSR_c
| PSR_f
| PSR_x
},
16329 {"csf", PSR_c
| PSR_s
| PSR_f
},
16330 {"csx", PSR_c
| PSR_s
| PSR_x
},
16331 {"cxf", PSR_c
| PSR_x
| PSR_f
},
16332 {"cxs", PSR_c
| PSR_x
| PSR_s
},
16333 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
16334 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
16335 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
16336 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
16337 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
16338 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
16339 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
16340 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
16341 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
16342 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
16343 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
16344 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
16345 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
16346 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
16347 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
16348 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
16349 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
16350 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
16351 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
16352 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
16353 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
16354 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
16355 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
16356 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
16360 {"nzcvqg", PSR_s
| PSR_f
}
16363 /* Table of V7M psr names. */
16364 static const struct asm_psr v7m_psrs
[] =
16366 {"apsr", 0 }, {"APSR", 0 },
16367 {"iapsr", 1 }, {"IAPSR", 1 },
16368 {"eapsr", 2 }, {"EAPSR", 2 },
16369 {"psr", 3 }, {"PSR", 3 },
16370 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
16371 {"ipsr", 5 }, {"IPSR", 5 },
16372 {"epsr", 6 }, {"EPSR", 6 },
16373 {"iepsr", 7 }, {"IEPSR", 7 },
16374 {"msp", 8 }, {"MSP", 8 },
16375 {"psp", 9 }, {"PSP", 9 },
16376 {"primask", 16}, {"PRIMASK", 16},
16377 {"basepri", 17}, {"BASEPRI", 17},
16378 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
16379 {"faultmask", 19}, {"FAULTMASK", 19},
16380 {"control", 20}, {"CONTROL", 20}
16383 /* Table of all shift-in-operand names. */
16384 static const struct asm_shift_name shift_names
[] =
16386 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
16387 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
16388 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
16389 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
16390 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
16391 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
16394 /* Table of all explicit relocation names. */
16396 static struct reloc_entry reloc_names
[] =
16398 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
16399 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
16400 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
16401 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
16402 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
16403 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
16404 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
16405 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
16406 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
16407 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
16408 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
16409 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
}
16413 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
16414 static const struct asm_cond conds
[] =
16418 {"cs", 0x2}, {"hs", 0x2},
16419 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16433 static struct asm_barrier_opt barrier_opt_names
[] =
16435 { "sy", 0xf }, { "SY", 0xf },
16436 { "un", 0x7 }, { "UN", 0x7 },
16437 { "st", 0xe }, { "ST", 0xe },
16438 { "unst", 0x6 }, { "UNST", 0x6 },
16439 { "ish", 0xb }, { "ISH", 0xb },
16440 { "sh", 0xb }, { "SH", 0xb },
16441 { "ishst", 0xa }, { "ISHST", 0xa },
16442 { "shst", 0xa }, { "SHST", 0xa },
16443 { "nsh", 0x7 }, { "NSH", 0x7 },
16444 { "nshst", 0x6 }, { "NSHST", 0x6 },
16445 { "osh", 0x3 }, { "OSH", 0x3 },
16446 { "oshst", 0x2 }, { "OSHST", 0x2 }
16449 /* Table of ARM-format instructions. */
16451 /* Macros for gluing together operand strings. N.B. In all cases
16452 other than OPS0, the trailing OP_stop comes from default
16453 zero-initialization of the unspecified elements of the array. */
16454 #define OPS0() { OP_stop, }
16455 #define OPS1(a) { OP_##a, }
16456 #define OPS2(a,b) { OP_##a,OP_##b, }
16457 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
16458 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
16459 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16460 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16462 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
16463 This is useful when mixing operands for ARM and THUMB, i.e. using the
16464 MIX_ARM_THUMB_OPERANDS macro.
16465 In order to use these macros, prefix the number of operands with _
16467 #define OPS_1(a) { a, }
16468 #define OPS_2(a,b) { a,b, }
16469 #define OPS_3(a,b,c) { a,b,c, }
16470 #define OPS_4(a,b,c,d) { a,b,c,d, }
16471 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
16472 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
16474 /* These macros abstract out the exact format of the mnemonic table and
16475 save some repeated characters. */
16477 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
16478 #define TxCE(mnem, op, top, nops, ops, ae, te) \
16479 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16480 THUMB_VARIANT, do_##ae, do_##te }
16482 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16483 a T_MNEM_xyz enumerator. */
16484 #define TCE(mnem, aop, top, nops, ops, ae, te) \
16485 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16486 #define tCE(mnem, aop, top, nops, ops, ae, te) \
16487 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16489 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16490 infix after the third character. */
16491 #define TxC3(mnem, op, top, nops, ops, ae, te) \
16492 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16493 THUMB_VARIANT, do_##ae, do_##te }
16494 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
16495 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16496 THUMB_VARIANT, do_##ae, do_##te }
16497 #define TC3(mnem, aop, top, nops, ops, ae, te) \
16498 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16499 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
16500 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16501 #define tC3(mnem, aop, top, nops, ops, ae, te) \
16502 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16503 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16504 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16506 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16507 appear in the condition table. */
16508 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16509 { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16510 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16512 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16513 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16514 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16515 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16516 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16517 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16518 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16519 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16520 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16521 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16522 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16523 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16524 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16525 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16526 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16527 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16528 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16529 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16530 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16531 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16533 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16534 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16535 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16536 TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
16538 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16539 field is still 0xE. Many of the Thumb variants can be executed
16540 conditionally, so this is checked separately. */
16541 #define TUE(mnem, op, top, nops, ops, ae, te) \
16542 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16543 THUMB_VARIANT, do_##ae, do_##te }
16545 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16546 condition code field. */
16547 #define TUF(mnem, op, top, nops, ops, ae, te) \
16548 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16549 THUMB_VARIANT, do_##ae, do_##te }
16551 /* ARM-only variants of all the above. */
16552 #define CE(mnem, op, nops, ops, ae) \
16553 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16555 #define C3(mnem, op, nops, ops, ae) \
16556 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16558 /* Legacy mnemonics that always have conditional infix after the third
16560 #define CL(mnem, op, nops, ops, ae) \
16561 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16562 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16564 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16565 #define cCE(mnem, op, nops, ops, ae) \
16566 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16568 /* Legacy coprocessor instructions where conditional infix and conditional
16569 suffix are ambiguous. For consistency this includes all FPA instructions,
16570 not just the potentially ambiguous ones. */
16571 #define cCL(mnem, op, nops, ops, ae) \
16572 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16573 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16575 /* Coprocessor, takes either a suffix or a position-3 infix
16576 (for an FPA corner case). */
16577 #define C3E(mnem, op, nops, ops, ae) \
16578 { mnem, OPS##nops ops, OT_csuf_or_in3, \
16579 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16581 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16582 { m1 #m2 m3, OPS##nops ops, \
16583 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16584 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16586 #define CM(m1, m2, op, nops, ops, ae) \
16587 xCM_ (m1, , m2, op, nops, ops, ae), \
16588 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16589 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16590 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16591 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16592 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16593 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16594 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16595 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16596 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16597 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16598 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16599 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16600 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16601 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16602 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16603 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16604 xCM_ (m1, le, m2, op, nops, ops, ae), \
16605 xCM_ (m1, al, m2, op, nops, ops, ae)
16607 #define UE(mnem, op, nops, ops, ae) \
16608 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16610 #define UF(mnem, op, nops, ops, ae) \
16611 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16613 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16614 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16615 use the same encoding function for each. */
16616 #define NUF(mnem, op, nops, ops, enc) \
16617 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16618 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16620 /* Neon data processing, version which indirects through neon_enc_tab for
16621 the various overloaded versions of opcodes. */
16622 #define nUF(mnem, op, nops, ops, enc) \
16623 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
16624 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16626 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16628 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16629 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16630 THUMB_VARIANT, do_##enc, do_##enc }
16632 #define NCE(mnem, op, nops, ops, enc) \
16633 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16635 #define NCEF(mnem, op, nops, ops, enc) \
16636 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16638 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16639 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16640 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
16641 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16643 #define nCE(mnem, op, nops, ops, enc) \
16644 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16646 #define nCEF(mnem, op, nops, ops, enc) \
16647 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16651 static const struct asm_opcode insns
[] =
16653 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16654 #define THUMB_VARIANT &arm_ext_v4t
16655 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16656 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16657 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16658 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16659 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16660 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16661 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16662 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16663 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16664 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16665 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16666 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16667 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16668 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16669 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16670 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16672 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16673 for setting PSR flag bits. They are obsolete in V6 and do not
16674 have Thumb equivalents. */
16675 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16676 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16677 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
16678 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16679 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16680 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
16681 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16682 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16683 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
16685 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16686 tC3("movs", 1b00000
, _movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16687 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16688 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16690 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16691 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
16692 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
16694 OP_ADDRGLDR
),ldst
, t_ldst
),
16695 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
16697 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16698 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16699 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16700 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16701 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16702 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16704 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16705 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16706 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
16707 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
16710 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
16711 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
16712 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
16714 /* Thumb-compatibility pseudo ops. */
16715 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16716 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16717 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16718 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16719 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16720 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16721 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16722 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16723 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
16724 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
16725 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
16726 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
16728 /* These may simplify to neg. */
16729 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16730 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16732 #undef THUMB_VARIANT
16733 #define THUMB_VARIANT & arm_ext_v6
16735 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
16737 /* V1 instructions with no Thumb analogue prior to V6T2. */
16738 #undef THUMB_VARIANT
16739 #define THUMB_VARIANT & arm_ext_v6t2
16741 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16742 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16743 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
16745 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
16746 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
16747 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
16748 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
16750 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16751 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16753 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16754 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16756 /* V1 instructions with no Thumb analogue at all. */
16757 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
16758 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
16760 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16761 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16762 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16763 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16764 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16765 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16766 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16767 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16770 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16771 #undef THUMB_VARIANT
16772 #define THUMB_VARIANT & arm_ext_v4t
16774 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16775 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16777 #undef THUMB_VARIANT
16778 #define THUMB_VARIANT & arm_ext_v6t2
16780 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
16781 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
16783 /* Generic coprocessor instructions. */
16784 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16785 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16786 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16787 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16788 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16789 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16790 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16793 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16795 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16796 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16799 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16800 #undef THUMB_VARIANT
16801 #define THUMB_VARIANT & arm_ext_msr
16803 TCE("mrs", 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
16804 TCE("msr", 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
16807 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16808 #undef THUMB_VARIANT
16809 #define THUMB_VARIANT & arm_ext_v6t2
16811 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16812 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16813 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16814 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16815 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16816 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16817 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16818 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16821 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16822 #undef THUMB_VARIANT
16823 #define THUMB_VARIANT & arm_ext_v4t
16825 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16826 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16827 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16828 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16829 tCM("ld","sh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16830 tCM("ld","sb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16833 #define ARM_VARIANT & arm_ext_v4t_5
16835 /* ARM Architecture 4T. */
16836 /* Note: bx (and blx) are required on V5, even if the processor does
16837 not support Thumb. */
16838 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
16841 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16842 #undef THUMB_VARIANT
16843 #define THUMB_VARIANT & arm_ext_v5t
16845 /* Note: blx has 2 variants; the .value coded here is for
16846 BLX(2). Only this variant has conditional execution. */
16847 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
16848 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
16850 #undef THUMB_VARIANT
16851 #define THUMB_VARIANT & arm_ext_v6t2
16853 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
16854 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16855 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16856 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16857 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16858 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16859 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16860 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16863 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16864 #undef THUMB_VARIANT
16865 #define THUMB_VARIANT &arm_ext_v5exp
16867 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16868 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16869 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16870 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16872 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16873 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16875 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16876 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16877 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16878 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16880 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16881 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16882 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16883 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16885 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16886 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16888 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16889 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16890 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16891 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16894 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16895 #undef THUMB_VARIANT
16896 #define THUMB_VARIANT &arm_ext_v6t2
16898 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
16899 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
16901 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
16902 ADDRGLDRS
), ldrd
, t_ldstd
),
16904 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16905 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16908 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16910 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
16913 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16914 #undef THUMB_VARIANT
16915 #define THUMB_VARIANT & arm_ext_v6
16917 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16918 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16919 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16920 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16921 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16922 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16923 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16924 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16925 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16926 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
16928 #undef THUMB_VARIANT
16929 #define THUMB_VARIANT & arm_ext_v6t2
16931 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
16932 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
16934 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16935 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16937 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
16938 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
16940 /* ARM V6 not included in V7M. */
16941 #undef THUMB_VARIANT
16942 #define THUMB_VARIANT & arm_ext_v6_notm
16943 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16944 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
16945 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
16946 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16947 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16948 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
16949 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
16950 TUF("rfeed", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16951 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
16952 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
16953 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
16954 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
16956 /* ARM V6 not included in V7M (eg. integer SIMD). */
16957 #undef THUMB_VARIANT
16958 #define THUMB_VARIANT & arm_ext_v6_dsp
16959 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
16960 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
16961 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
16962 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16963 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16964 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16965 /* Old name for QASX. */
16966 TCE("qaddsubx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16967 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16968 /* Old name for QSAX. */
16969 TCE("qsubaddx", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16970 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16971 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16972 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16973 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16974 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16975 /* Old name for SASX. */
16976 TCE("saddsubx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16977 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16978 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16979 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16980 /* Old name for SHASX. */
16981 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16982 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16983 /* Old name for SHSAX. */
16984 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16985 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16986 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16987 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16988 /* Old name for SSAX. */
16989 TCE("ssubaddx", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16990 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16991 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16992 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16993 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16994 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16995 /* Old name for UASX. */
16996 TCE("uaddsubx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16997 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16998 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16999 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17000 /* Old name for UHASX. */
17001 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17002 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17003 /* Old name for UHSAX. */
17004 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17005 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17006 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17007 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17008 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17009 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17010 /* Old name for UQASX. */
17011 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17012 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17013 /* Old name for UQSAX. */
17014 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17015 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17016 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17017 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17018 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17019 /* Old name for USAX. */
17020 TCE("usubaddx", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17021 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17022 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
17023 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
17024 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
17025 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
17026 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
17027 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
17028 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
17029 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
17030 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
17031 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
17032 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
17033 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
17034 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
17035 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
17036 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
17037 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
17038 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
17039 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
17040 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
17041 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
17042 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
17043 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
17044 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
17045 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
17046 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
17047 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
17048 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
17049 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
17050 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
17051 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
17052 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
17053 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
17056 #define ARM_VARIANT & arm_ext_v6k
17057 #undef THUMB_VARIANT
17058 #define THUMB_VARIANT & arm_ext_v6k
17060 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
17061 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
17062 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
17063 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
17065 #undef THUMB_VARIANT
17066 #define THUMB_VARIANT & arm_ext_v6_notm
17067 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
17069 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
17070 RRnpcb
), strexd
, t_strexd
),
17072 #undef THUMB_VARIANT
17073 #define THUMB_VARIANT & arm_ext_v6t2
17074 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
17076 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
17078 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
17080 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
17082 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
17085 #define ARM_VARIANT & arm_ext_sec
17086 #undef THUMB_VARIANT
17087 #define THUMB_VARIANT & arm_ext_sec
17089 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
17092 #define ARM_VARIANT & arm_ext_v6t2
17093 #undef THUMB_VARIANT
17094 #define THUMB_VARIANT & arm_ext_v6t2
17096 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
17097 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
17098 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
17099 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
17101 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
17102 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
17103 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
17104 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
17106 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
17107 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
17108 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
17109 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
17111 /* Thumb-only instructions. */
17113 #define ARM_VARIANT NULL
17114 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
17115 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
17117 /* ARM does not really have an IT instruction, so always allow it.
17118 The opcode is copied from Thumb in order to allow warnings in
17119 -mimplicit-it=[never | arm] modes. */
17121 #define ARM_VARIANT & arm_ext_v1
17123 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
17124 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
17125 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
17126 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
17127 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
17128 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
17129 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
17130 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
17131 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
17132 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
17133 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
17134 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
17135 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
17136 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
17137 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
17138 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
17139 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
17140 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
17142 /* Thumb2 only instructions. */
17144 #define ARM_VARIANT NULL
17146 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
17147 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
17148 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
17149 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
17150 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
17151 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
17153 /* Hardware division instructions. */
17155 #define ARM_VARIANT & arm_ext_adiv
17156 #undef THUMB_VARIANT
17157 #define THUMB_VARIANT & arm_ext_div
17159 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
17160 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
17162 /* ARM V6M/V7 instructions. */
17164 #define ARM_VARIANT & arm_ext_barrier
17165 #undef THUMB_VARIANT
17166 #define THUMB_VARIANT & arm_ext_barrier
17168 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, t_barrier
),
17169 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, t_barrier
),
17170 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, t_barrier
),
17172 /* ARM V7 instructions. */
17174 #define ARM_VARIANT & arm_ext_v7
17175 #undef THUMB_VARIANT
17176 #define THUMB_VARIANT & arm_ext_v7
17178 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
17179 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
17182 #define ARM_VARIANT & arm_ext_mp
17183 #undef THUMB_VARIANT
17184 #define THUMB_VARIANT & arm_ext_mp
17186 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
17189 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
17191 cCE("wfs", e200110
, 1, (RR
), rd
),
17192 cCE("rfs", e300110
, 1, (RR
), rd
),
17193 cCE("wfc", e400110
, 1, (RR
), rd
),
17194 cCE("rfc", e500110
, 1, (RR
), rd
),
17196 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17197 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17198 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17199 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17201 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17202 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17203 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17204 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17206 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
17207 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
17208 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
17209 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
17210 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
17211 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
17212 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
17213 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
17214 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
17215 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
17216 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
17217 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
17219 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
17220 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
17221 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
17222 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
17223 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
17224 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
17225 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
17226 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
17227 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
17228 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
17229 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
17230 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
17232 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
17233 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
17234 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
17235 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
17236 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
17237 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
17238 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
17239 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
17240 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
17241 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
17242 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
17243 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
17245 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
17246 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
17247 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
17248 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
17249 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
17250 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
17251 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
17252 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
17253 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
17254 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
17255 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
17256 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
17258 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
17259 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
17260 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
17261 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
17262 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
17263 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
17264 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
17265 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
17266 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
17267 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
17268 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
17269 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
17271 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
17272 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
17273 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
17274 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
17275 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
17276 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
17277 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
17278 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
17279 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
17280 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
17281 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
17282 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
17284 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
17285 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
17286 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
17287 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
17288 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
17289 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
17290 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
17291 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
17292 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
17293 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
17294 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
17295 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
17297 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
17298 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
17299 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
17300 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
17301 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
17302 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
17303 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
17304 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
17305 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
17306 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
17307 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
17308 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
17310 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
17311 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
17312 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
17313 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
17314 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
17315 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
17316 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
17317 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
17318 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
17319 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
17320 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
17321 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
17323 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
17324 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
17325 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
17326 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
17327 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
17328 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
17329 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
17330 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
17331 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
17332 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
17333 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
17334 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
17336 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
17337 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
17338 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
17339 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
17340 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
17341 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
17342 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
17343 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
17344 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
17345 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
17346 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
17347 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
17349 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
17350 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
17351 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
17352 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
17353 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
17354 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
17355 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
17356 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
17357 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
17358 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
17359 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
17360 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
17362 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
17363 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
17364 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
17365 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
17366 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
17367 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
17368 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
17369 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
17370 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
17371 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
17372 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
17373 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
17375 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
17376 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
17377 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
17378 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
17379 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
17380 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
17381 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
17382 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
17383 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
17384 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
17385 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
17386 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
17388 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
17389 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
17390 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
17391 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
17392 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
17393 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
17394 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
17395 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
17396 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
17397 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
17398 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
17399 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
17401 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
17402 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
17403 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
17404 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
17405 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
17406 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
17407 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
17408 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
17409 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
17410 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
17411 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
17412 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
17414 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17415 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17416 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17417 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17418 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17419 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17420 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17421 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17422 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17423 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17424 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17425 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17427 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17428 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17429 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17430 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17431 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17432 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17433 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17434 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17435 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17436 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17437 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17438 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17440 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17441 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17442 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17443 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17444 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17445 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17446 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17447 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17448 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17449 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17450 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17451 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17453 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17454 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17455 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17456 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17457 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17458 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17459 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17460 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17461 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17462 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17463 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17464 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17466 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17467 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17468 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17469 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17470 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17471 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17472 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17473 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17474 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17475 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17476 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17477 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17479 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17480 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17481 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17482 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17483 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17484 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17485 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17486 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17487 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17488 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17489 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17490 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17492 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17493 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17494 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17495 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17496 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17497 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17498 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17499 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17500 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17501 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17502 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17503 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17505 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17506 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17507 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17508 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17509 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17510 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17511 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17512 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17513 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17514 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17515 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17516 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17518 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17519 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17520 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17521 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17522 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17523 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17524 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17525 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17526 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17527 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17528 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17529 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17531 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17532 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17533 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17534 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17535 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17536 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17537 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17538 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17539 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17540 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17541 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17542 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17544 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17545 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17546 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17547 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17548 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17549 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17550 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17551 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17552 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17553 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17554 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17555 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17557 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17558 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17559 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17560 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17561 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17562 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17563 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17564 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17565 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17566 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17567 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17568 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17570 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17571 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17572 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17573 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17574 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17575 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17576 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17577 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17578 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17579 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17580 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17581 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17583 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17584 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17585 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17586 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17588 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
17589 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
17590 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
17591 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
17592 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
17593 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
17594 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
17595 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
17596 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
17597 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
17598 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
17599 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
17601 /* The implementation of the FIX instruction is broken on some
17602 assemblers, in that it accepts a precision specifier as well as a
17603 rounding specifier, despite the fact that this is meaningless.
17604 To be more compatible, we accept it as well, though of course it
17605 does not set any bits. */
17606 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
17607 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
17608 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
17609 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
17610 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
17611 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
17612 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
17613 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
17614 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
17615 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
17616 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
17617 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
17618 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
17620 /* Instructions that were new with the real FPA, call them V2. */
17622 #define ARM_VARIANT & fpu_fpa_ext_v2
17624 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17625 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17626 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17627 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17628 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17629 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17632 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17634 /* Moves and type conversions. */
17635 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17636 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
17637 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
17638 cCE("fmstat", ef1fa10
, 0, (), noargs
),
17639 cCE("vmrs", ef10a10
, 2, (APSR_RR
, RVC
), vmrs
),
17640 cCE("vmsr", ee10a10
, 2, (RVC
, RR
), vmsr
),
17641 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17642 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17643 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17644 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17645 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17646 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17647 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
17648 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
17650 /* Memory operations. */
17651 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17652 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17653 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
17654 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
17655 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
17656 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
17657 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
17658 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
17659 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
17660 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
17661 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
17662 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
17663 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
17664 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
17665 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
17666 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
17667 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
17668 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
17670 /* Monadic operations. */
17671 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17672 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17673 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17675 /* Dyadic operations. */
17676 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17677 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17678 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17679 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17680 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17681 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17682 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17683 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17684 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17687 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17688 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
17689 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17690 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
17692 /* Double precision load/store are still present on single precision
17693 implementations. */
17694 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17695 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17696 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
17697 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
17698 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
17699 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
17700 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
17701 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
17702 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
17703 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
17706 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17708 /* Moves and type conversions. */
17709 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17710 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17711 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17712 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17713 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17714 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17715 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17716 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17717 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17718 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17719 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17720 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17721 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17723 /* Monadic operations. */
17724 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17725 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17726 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17728 /* Dyadic operations. */
17729 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17730 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17731 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17732 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17733 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17734 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17735 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17736 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17737 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17740 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17741 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
17742 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17743 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
17746 #define ARM_VARIANT & fpu_vfp_ext_v2
17748 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
17749 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
17750 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
17751 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
17753 /* Instructions which may belong to either the Neon or VFP instruction sets.
17754 Individual encoder functions perform additional architecture checks. */
17756 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17757 #undef THUMB_VARIANT
17758 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17760 /* These mnemonics are unique to VFP. */
17761 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
17762 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
17763 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17764 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17765 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17766 nCE(vcmp
, _vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17767 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17768 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
17769 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
17770 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
17772 /* Mnemonics shared by Neon and VFP. */
17773 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
17774 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17775 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17777 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17778 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17780 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17781 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17783 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17784 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17785 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17786 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17787 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17788 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17789 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17790 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17792 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
17793 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
17794 nCEF(vcvtb
, _vcvt
, 2, (RVS
, RVS
), neon_cvtb
),
17795 nCEF(vcvtt
, _vcvt
, 2, (RVS
, RVS
), neon_cvtt
),
17798 /* NOTE: All VMOV encoding is special-cased! */
17799 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
17800 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
17802 #undef THUMB_VARIANT
17803 #define THUMB_VARIANT & fpu_neon_ext_v1
17805 #define ARM_VARIANT & fpu_neon_ext_v1
17807 /* Data processing with three registers of the same length. */
17808 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17809 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
17810 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
17811 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17812 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17813 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17814 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17815 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17816 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17817 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17818 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17819 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17820 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17821 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17822 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17823 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17824 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17825 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17826 /* If not immediate, fall back to neon_dyadic_i64_su.
17827 shl_imm should accept I8 I16 I32 I64,
17828 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17829 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
17830 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
17831 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
17832 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
17833 /* Logic ops, types optional & ignored. */
17834 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17835 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17836 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17837 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17838 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17839 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17840 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17841 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17842 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
17843 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
17844 /* Bitfield ops, untyped. */
17845 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17846 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17847 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17848 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17849 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17850 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17851 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17852 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17853 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17854 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17855 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17856 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17857 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17858 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17859 back to neon_dyadic_if_su. */
17860 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17861 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17862 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17863 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17864 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17865 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17866 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17867 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17868 /* Comparison. Type I8 I16 I32 F32. */
17869 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
17870 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
17871 /* As above, D registers only. */
17872 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17873 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17874 /* Int and float variants, signedness unimportant. */
17875 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17876 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17877 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
17878 /* Add/sub take types I8 I16 I32 I64 F32. */
17879 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17880 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17881 /* vtst takes sizes 8, 16, 32. */
17882 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
17883 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
17884 /* VMUL takes I8 I16 I32 F32 P8. */
17885 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
17886 /* VQD{R}MULH takes S16 S32. */
17887 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17888 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17889 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17890 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17891 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17892 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17893 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17894 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17895 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17896 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17897 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17898 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17899 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17900 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17901 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17902 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17904 /* Two address, int/float. Types S8 S16 S32 F32. */
17905 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17906 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17908 /* Data processing with two registers and a shift amount. */
17909 /* Right shifts, and variants with rounding.
17910 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17911 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17912 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17913 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17914 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17915 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17916 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17917 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17918 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17919 /* Shift and insert. Sizes accepted 8 16 32 64. */
17920 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
17921 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
17922 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
17923 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
17924 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17925 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
17926 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
17927 /* Right shift immediate, saturating & narrowing, with rounding variants.
17928 Types accepted S16 S32 S64 U16 U32 U64. */
17929 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17930 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17931 /* As above, unsigned. Types accepted S16 S32 S64. */
17932 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17933 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17934 /* Right shift narrowing. Types accepted I16 I32 I64. */
17935 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17936 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17937 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17938 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
17939 /* CVT with optional immediate for fixed-point variant. */
17940 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
17942 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
17943 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
17945 /* Data processing, three registers of different lengths. */
17946 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17947 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
17948 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17949 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17950 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17951 /* If not scalar, fall back to neon_dyadic_long.
17952 Vector types as above, scalar types S16 S32 U16 U32. */
17953 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17954 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17955 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17956 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17957 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17958 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17959 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17960 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17961 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17962 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17963 /* Saturating doubling multiplies. Types S16 S32. */
17964 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17965 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17966 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17967 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17968 S16 S32 U16 U32. */
17969 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
17971 /* Extract. Size 8. */
17972 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
17973 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
17975 /* Two registers, miscellaneous. */
17976 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17977 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
17978 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
17979 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
17980 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
17981 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
17982 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
17983 /* Vector replicate. Sizes 8 16 32. */
17984 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
17985 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
17986 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17987 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
17988 /* VMOVN. Types I16 I32 I64. */
17989 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
17990 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17991 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
17992 /* VQMOVUN. Types S16 S32 S64. */
17993 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
17994 /* VZIP / VUZP. Sizes 8 16 32. */
17995 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17996 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17997 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17998 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17999 /* VQABS / VQNEG. Types S8 S16 S32. */
18000 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
18001 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
18002 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
18003 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
18004 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
18005 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
18006 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
18007 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
18008 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
18009 /* Reciprocal estimates. Types U32 F32. */
18010 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
18011 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
18012 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
18013 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
18014 /* VCLS. Types S8 S16 S32. */
18015 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
18016 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
18017 /* VCLZ. Types I8 I16 I32. */
18018 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
18019 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
18020 /* VCNT. Size 8. */
18021 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
18022 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
18023 /* Two address, untyped. */
18024 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
18025 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
18026 /* VTRN. Sizes 8 16 32. */
18027 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
18028 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
18030 /* Table lookup. Size 8. */
18031 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
18032 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
18034 #undef THUMB_VARIANT
18035 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
18037 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
18039 /* Neon element/structure load/store. */
18040 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
18041 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
18042 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
18043 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
18044 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
18045 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
18046 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
18047 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
18049 #undef THUMB_VARIANT
18050 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
18052 #define ARM_VARIANT &fpu_vfp_ext_v3xd
18053 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
18054 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
18055 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
18056 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
18057 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
18058 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
18059 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
18060 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
18061 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
18063 #undef THUMB_VARIANT
18064 #define THUMB_VARIANT & fpu_vfp_ext_v3
18066 #define ARM_VARIANT & fpu_vfp_ext_v3
18068 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
18069 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
18070 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
18071 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
18072 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
18073 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
18074 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
18075 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
18076 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
18079 #define ARM_VARIANT &fpu_vfp_ext_fma
18080 #undef THUMB_VARIANT
18081 #define THUMB_VARIANT &fpu_vfp_ext_fma
18082 /* Mnemonics shared by Neon and VFP. These are included in the
18083 VFP FMA variant; NEON and VFP FMA always includes the NEON
18084 FMA instructions. */
18085 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
18086 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
18087 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
18088 the v form should always be used. */
18089 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
18090 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
18091 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
18092 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
18093 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
18094 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
18096 #undef THUMB_VARIANT
18098 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
18100 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18101 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18102 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18103 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18104 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18105 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18106 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
18107 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
18110 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
18112 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
18113 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
18114 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
18115 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
18116 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
18117 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
18118 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
18119 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
18120 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
18121 cCE("textrmub", e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18122 cCE("textrmuh", e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18123 cCE("textrmuw", e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18124 cCE("textrmsb", e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18125 cCE("textrmsh", e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18126 cCE("textrmsw", e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18127 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
18128 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
18129 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
18130 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
18131 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
18132 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18133 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18134 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18135 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18136 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18137 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18138 cCE("tmovmskb", e100030
, 2, (RR
, RIWR
), rd_rn
),
18139 cCE("tmovmskh", e500030
, 2, (RR
, RIWR
), rd_rn
),
18140 cCE("tmovmskw", e900030
, 2, (RR
, RIWR
), rd_rn
),
18141 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
18142 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
18143 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
18144 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
18145 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
18146 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18147 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18148 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18149 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18150 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18151 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18152 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18153 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18154 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18155 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18156 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18157 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18158 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
18159 cCE("walignr0", e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18160 cCE("walignr1", e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18161 cCE("walignr2", ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18162 cCE("walignr3", eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18163 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18164 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18165 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18166 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18167 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18168 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18169 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18170 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18171 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18172 cCE("wcmpgtub", e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18173 cCE("wcmpgtuh", e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18174 cCE("wcmpgtuw", e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18175 cCE("wcmpgtsb", e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18176 cCE("wcmpgtsh", e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18177 cCE("wcmpgtsw", eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18178 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18179 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18180 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
18181 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
18182 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18183 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18184 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18185 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18186 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18187 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18188 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18189 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18190 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18191 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18192 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18193 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18194 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18195 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18196 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18197 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18198 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18199 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18200 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
18201 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18202 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18203 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18204 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18205 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18206 cCE("wpackhss", e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18207 cCE("wpackhus", e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18208 cCE("wpackwss", eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18209 cCE("wpackwus", e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18210 cCE("wpackdss", ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18211 cCE("wpackdus", ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18212 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18213 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18214 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18215 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18216 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18217 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18218 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18219 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18220 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18221 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18222 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
18223 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18224 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18225 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18226 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18227 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18228 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18229 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18230 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18231 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18232 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18233 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18234 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18235 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18236 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18237 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18238 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18239 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18240 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18241 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18242 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18243 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
18244 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
18245 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18246 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18247 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18248 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18249 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18250 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18251 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18252 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18253 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18254 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18255 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18256 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18257 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18258 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18259 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18260 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18261 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18262 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18263 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18264 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18265 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18266 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18267 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18268 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18269 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18270 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18271 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18272 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18273 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
18276 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
18278 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
18279 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
18280 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
18281 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18282 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18283 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18284 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18285 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18286 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18287 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18288 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18289 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18290 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18291 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18292 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18293 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18294 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18295 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18296 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18297 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18298 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
18299 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18300 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18301 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18302 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18303 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18304 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18305 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18306 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18307 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18308 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18309 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18310 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18311 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18312 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18313 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18314 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18315 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18316 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18317 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18318 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18319 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18320 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18321 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18322 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18323 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18324 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18325 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18326 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18327 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18328 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18329 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18330 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18331 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18332 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18333 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18334 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18337 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
18339 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
18340 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
18341 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
18342 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
18343 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
18344 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
18345 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
18346 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
18347 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
18348 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
18349 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
18350 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
18351 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
18352 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
18353 cCE("cfmv64lr", e000510
, 2, (RMDX
, RR
), rn_rd
),
18354 cCE("cfmvr64l", e100510
, 2, (RR
, RMDX
), rd_rn
),
18355 cCE("cfmv64hr", e000530
, 2, (RMDX
, RR
), rn_rd
),
18356 cCE("cfmvr64h", e100530
, 2, (RR
, RMDX
), rd_rn
),
18357 cCE("cfmval32", e200440
, 2, (RMAX
, RMFX
), rd_rn
),
18358 cCE("cfmv32al", e100440
, 2, (RMFX
, RMAX
), rd_rn
),
18359 cCE("cfmvam32", e200460
, 2, (RMAX
, RMFX
), rd_rn
),
18360 cCE("cfmv32am", e100460
, 2, (RMFX
, RMAX
), rd_rn
),
18361 cCE("cfmvah32", e200480
, 2, (RMAX
, RMFX
), rd_rn
),
18362 cCE("cfmv32ah", e100480
, 2, (RMFX
, RMAX
), rd_rn
),
18363 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
18364 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
18365 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
18366 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
18367 cCE("cfmvsc32", e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
18368 cCE("cfmv32sc", e1004e0
, 2, (RMDX
, RMDS
), rd
),
18369 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
18370 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
18371 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
18372 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
18373 cCE("cfcvt32s", e000480
, 2, (RMF
, RMFX
), rd_rn
),
18374 cCE("cfcvt32d", e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
18375 cCE("cfcvt64s", e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
18376 cCE("cfcvt64d", e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
18377 cCE("cfcvts32", e100580
, 2, (RMFX
, RMF
), rd_rn
),
18378 cCE("cfcvtd32", e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
18379 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
18380 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
18381 cCE("cfrshl32", e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
18382 cCE("cfrshl64", e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
18383 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
18384 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
18385 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
18386 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
18387 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
18388 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
18389 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
18390 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
18391 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
18392 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
18393 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
18394 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
18395 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
18396 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
18397 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
18398 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
18399 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
18400 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
18401 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
18402 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
18403 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18404 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
18405 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18406 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
18407 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18408 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
18409 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18410 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18411 cCE("cfmadd32", e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
18412 cCE("cfmsub32", e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
18413 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
18414 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
18417 #undef THUMB_VARIANT
18444 /* MD interface: bits in the object file. */
18446 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18447 for use in the a.out file, and stores them in the array pointed to by buf.
18448 This knows about the endian-ness of the target machine and does
18449 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
18450 2 (short) and 4 (long) Floating numbers are put out as a series of
18451 LITTLENUMS (shorts, here at least). */
18454 md_number_to_chars (char * buf
, valueT val
, int n
)
18456 if (target_big_endian
)
18457 number_to_chars_bigendian (buf
, val
, n
);
18459 number_to_chars_littleendian (buf
, val
, n
);
18463 md_chars_to_number (char * buf
, int n
)
18466 unsigned char * where
= (unsigned char *) buf
;
18468 if (target_big_endian
)
18473 result
|= (*where
++ & 255);
18481 result
|= (where
[n
] & 255);
18488 /* MD interface: Sections. */
18490 /* Estimate the size of a frag before relaxing. Assume everything fits in
18494 md_estimate_size_before_relax (fragS
* fragp
,
18495 segT segtype ATTRIBUTE_UNUSED
)
18501 /* Convert a machine dependent frag. */
18504 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
18506 unsigned long insn
;
18507 unsigned long old_op
;
18515 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
18517 old_op
= bfd_get_16(abfd
, buf
);
18518 if (fragp
->fr_symbol
)
18520 exp
.X_op
= O_symbol
;
18521 exp
.X_add_symbol
= fragp
->fr_symbol
;
18525 exp
.X_op
= O_constant
;
18527 exp
.X_add_number
= fragp
->fr_offset
;
18528 opcode
= fragp
->fr_subtype
;
18531 case T_MNEM_ldr_pc
:
18532 case T_MNEM_ldr_pc2
:
18533 case T_MNEM_ldr_sp
:
18534 case T_MNEM_str_sp
:
18541 if (fragp
->fr_var
== 4)
18543 insn
= THUMB_OP32 (opcode
);
18544 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
18546 insn
|= (old_op
& 0x700) << 4;
18550 insn
|= (old_op
& 7) << 12;
18551 insn
|= (old_op
& 0x38) << 13;
18553 insn
|= 0x00000c00;
18554 put_thumb32_insn (buf
, insn
);
18555 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
18559 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
18561 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
18564 if (fragp
->fr_var
== 4)
18566 insn
= THUMB_OP32 (opcode
);
18567 insn
|= (old_op
& 0xf0) << 4;
18568 put_thumb32_insn (buf
, insn
);
18569 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
18573 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18574 exp
.X_add_number
-= 4;
18582 if (fragp
->fr_var
== 4)
18584 int r0off
= (opcode
== T_MNEM_mov
18585 || opcode
== T_MNEM_movs
) ? 0 : 8;
18586 insn
= THUMB_OP32 (opcode
);
18587 insn
= (insn
& 0xe1ffffff) | 0x10000000;
18588 insn
|= (old_op
& 0x700) << r0off
;
18589 put_thumb32_insn (buf
, insn
);
18590 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18594 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
18599 if (fragp
->fr_var
== 4)
18601 insn
= THUMB_OP32(opcode
);
18602 put_thumb32_insn (buf
, insn
);
18603 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
18606 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
18610 if (fragp
->fr_var
== 4)
18612 insn
= THUMB_OP32(opcode
);
18613 insn
|= (old_op
& 0xf00) << 14;
18614 put_thumb32_insn (buf
, insn
);
18615 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
18618 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
18621 case T_MNEM_add_sp
:
18622 case T_MNEM_add_pc
:
18623 case T_MNEM_inc_sp
:
18624 case T_MNEM_dec_sp
:
18625 if (fragp
->fr_var
== 4)
18627 /* ??? Choose between add and addw. */
18628 insn
= THUMB_OP32 (opcode
);
18629 insn
|= (old_op
& 0xf0) << 4;
18630 put_thumb32_insn (buf
, insn
);
18631 if (opcode
== T_MNEM_add_pc
)
18632 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
18634 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18637 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18645 if (fragp
->fr_var
== 4)
18647 insn
= THUMB_OP32 (opcode
);
18648 insn
|= (old_op
& 0xf0) << 4;
18649 insn
|= (old_op
& 0xf) << 16;
18650 put_thumb32_insn (buf
, insn
);
18651 if (insn
& (1 << 20))
18652 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18654 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18657 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18663 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
18664 (enum bfd_reloc_code_real
) reloc_type
);
18665 fixp
->fx_file
= fragp
->fr_file
;
18666 fixp
->fx_line
= fragp
->fr_line
;
18667 fragp
->fr_fix
+= fragp
->fr_var
;
18670 /* Return the size of a relaxable immediate operand instruction.
18671 SHIFT and SIZE specify the form of the allowable immediate. */
18673 relax_immediate (fragS
*fragp
, int size
, int shift
)
18679 /* ??? Should be able to do better than this. */
18680 if (fragp
->fr_symbol
)
18683 low
= (1 << shift
) - 1;
18684 mask
= (1 << (shift
+ size
)) - (1 << shift
);
18685 offset
= fragp
->fr_offset
;
18686 /* Force misaligned offsets to 32-bit variant. */
18689 if (offset
& ~mask
)
18694 /* Get the address of a symbol during relaxation. */
18696 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
18702 sym
= fragp
->fr_symbol
;
18703 sym_frag
= symbol_get_frag (sym
);
18704 know (S_GET_SEGMENT (sym
) != absolute_section
18705 || sym_frag
== &zero_address_frag
);
18706 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
18708 /* If frag has yet to be reached on this pass, assume it will
18709 move by STRETCH just as we did. If this is not so, it will
18710 be because some frag between grows, and that will force
18714 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
18718 /* Adjust stretch for any alignment frag. Note that if have
18719 been expanding the earlier code, the symbol may be
18720 defined in what appears to be an earlier frag. FIXME:
18721 This doesn't handle the fr_subtype field, which specifies
18722 a maximum number of bytes to skip when doing an
18724 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
18726 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
18729 stretch
= - ((- stretch
)
18730 & ~ ((1 << (int) f
->fr_offset
) - 1));
18732 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
18744 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18747 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
18752 /* Assume worst case for symbols not known to be in the same section. */
18753 if (fragp
->fr_symbol
== NULL
18754 || !S_IS_DEFINED (fragp
->fr_symbol
)
18755 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
18756 || S_IS_WEAK (fragp
->fr_symbol
))
18759 val
= relaxed_symbol_addr (fragp
, stretch
);
18760 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
18761 addr
= (addr
+ 4) & ~3;
18762 /* Force misaligned targets to 32-bit variant. */
18766 if (val
< 0 || val
> 1020)
18771 /* Return the size of a relaxable add/sub immediate instruction. */
18773 relax_addsub (fragS
*fragp
, asection
*sec
)
18778 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
18779 op
= bfd_get_16(sec
->owner
, buf
);
18780 if ((op
& 0xf) == ((op
>> 4) & 0xf))
18781 return relax_immediate (fragp
, 8, 0);
18783 return relax_immediate (fragp
, 3, 0);
18787 /* Return the size of a relaxable branch instruction. BITS is the
18788 size of the offset field in the narrow instruction. */
18791 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
18797 /* Assume worst case for symbols not known to be in the same section. */
18798 if (!S_IS_DEFINED (fragp
->fr_symbol
)
18799 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
18800 || S_IS_WEAK (fragp
->fr_symbol
))
18804 if (S_IS_DEFINED (fragp
->fr_symbol
)
18805 && ARM_IS_FUNC (fragp
->fr_symbol
))
18809 val
= relaxed_symbol_addr (fragp
, stretch
);
18810 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
18813 /* Offset is a signed value *2 */
18815 if (val
>= limit
|| val
< -limit
)
18821 /* Relax a machine dependent frag. This returns the amount by which
18822 the current size of the frag should change. */
18825 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
18830 oldsize
= fragp
->fr_var
;
18831 switch (fragp
->fr_subtype
)
18833 case T_MNEM_ldr_pc2
:
18834 newsize
= relax_adr (fragp
, sec
, stretch
);
18836 case T_MNEM_ldr_pc
:
18837 case T_MNEM_ldr_sp
:
18838 case T_MNEM_str_sp
:
18839 newsize
= relax_immediate (fragp
, 8, 2);
18843 newsize
= relax_immediate (fragp
, 5, 2);
18847 newsize
= relax_immediate (fragp
, 5, 1);
18851 newsize
= relax_immediate (fragp
, 5, 0);
18854 newsize
= relax_adr (fragp
, sec
, stretch
);
18860 newsize
= relax_immediate (fragp
, 8, 0);
18863 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
18866 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
18868 case T_MNEM_add_sp
:
18869 case T_MNEM_add_pc
:
18870 newsize
= relax_immediate (fragp
, 8, 2);
18872 case T_MNEM_inc_sp
:
18873 case T_MNEM_dec_sp
:
18874 newsize
= relax_immediate (fragp
, 7, 2);
18880 newsize
= relax_addsub (fragp
, sec
);
18886 fragp
->fr_var
= newsize
;
18887 /* Freeze wide instructions that are at or before the same location as
18888 in the previous pass. This avoids infinite loops.
18889 Don't freeze them unconditionally because targets may be artificially
18890 misaligned by the expansion of preceding frags. */
18891 if (stretch
<= 0 && newsize
> 2)
18893 md_convert_frag (sec
->owner
, sec
, fragp
);
18897 return newsize
- oldsize
;
18900 /* Round up a section size to the appropriate boundary. */
18903 md_section_align (segT segment ATTRIBUTE_UNUSED
,
18906 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18907 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
18909 /* For a.out, force the section size to be aligned. If we don't do
18910 this, BFD will align it for us, but it will not write out the
18911 final bytes of the section. This may be a bug in BFD, but it is
18912 easier to fix it here since that is how the other a.out targets
18916 align
= bfd_get_section_alignment (stdoutput
, segment
);
18917 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
18924 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18925 of an rs_align_code fragment. */
18928 arm_handle_align (fragS
* fragP
)
18930 static char const arm_noop
[2][2][4] =
18933 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18934 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18937 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18938 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18941 static char const thumb_noop
[2][2][2] =
18944 {0xc0, 0x46}, /* LE */
18945 {0x46, 0xc0}, /* BE */
18948 {0x00, 0xbf}, /* LE */
18949 {0xbf, 0x00} /* BE */
18952 static char const wide_thumb_noop
[2][4] =
18953 { /* Wide Thumb-2 */
18954 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18955 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18958 unsigned bytes
, fix
, noop_size
;
18961 const char *narrow_noop
= NULL
;
18966 if (fragP
->fr_type
!= rs_align_code
)
18969 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
18970 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
18973 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
18974 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
18976 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
18978 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
18980 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
18982 narrow_noop
= thumb_noop
[1][target_big_endian
];
18983 noop
= wide_thumb_noop
[target_big_endian
];
18986 noop
= thumb_noop
[0][target_big_endian
];
18994 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
) != 0]
18995 [target_big_endian
];
19002 fragP
->fr_var
= noop_size
;
19004 if (bytes
& (noop_size
- 1))
19006 fix
= bytes
& (noop_size
- 1);
19008 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
19010 memset (p
, 0, fix
);
19017 if (bytes
& noop_size
)
19019 /* Insert a narrow noop. */
19020 memcpy (p
, narrow_noop
, noop_size
);
19022 bytes
-= noop_size
;
19026 /* Use wide noops for the remainder */
19030 while (bytes
>= noop_size
)
19032 memcpy (p
, noop
, noop_size
);
19034 bytes
-= noop_size
;
19038 fragP
->fr_fix
+= fix
;
19041 /* Called from md_do_align. Used to create an alignment
19042 frag in a code section. */
19045 arm_frag_align_code (int n
, int max
)
19049 /* We assume that there will never be a requirement
19050 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
19051 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
19056 _("alignments greater than %d bytes not supported in .text sections."),
19057 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
19058 as_fatal ("%s", err_msg
);
19061 p
= frag_var (rs_align_code
,
19062 MAX_MEM_FOR_RS_ALIGN_CODE
,
19064 (relax_substateT
) max
,
19071 /* Perform target specific initialisation of a frag.
19072 Note - despite the name this initialisation is not done when the frag
19073 is created, but only when its type is assigned. A frag can be created
19074 and used a long time before its type is set, so beware of assuming that
19075 this initialisationis performed first. */
19079 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
19081 /* Record whether this frag is in an ARM or a THUMB area. */
19082 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
19085 #else /* OBJ_ELF is defined. */
19087 arm_init_frag (fragS
* fragP
, int max_chars
)
19089 /* If the current ARM vs THUMB mode has not already
19090 been recorded into this frag then do so now. */
19091 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
19093 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
19095 /* Record a mapping symbol for alignment frags. We will delete this
19096 later if the alignment ends up empty. */
19097 switch (fragP
->fr_type
)
19100 case rs_align_test
:
19102 mapping_state_2 (MAP_DATA
, max_chars
);
19104 case rs_align_code
:
19105 mapping_state_2 (thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
19113 /* When we change sections we need to issue a new mapping symbol. */
19116 arm_elf_change_section (void)
19118 /* Link an unlinked unwind index table section to the .text section. */
19119 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
19120 && elf_linked_to_section (now_seg
) == NULL
)
19121 elf_linked_to_section (now_seg
) = text_section
;
19125 arm_elf_section_type (const char * str
, size_t len
)
19127 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
19128 return SHT_ARM_EXIDX
;
19133 /* Code to deal with unwinding tables. */
19135 static void add_unwind_adjustsp (offsetT
);
19137 /* Generate any deferred unwind frame offset. */
19140 flush_pending_unwind (void)
19144 offset
= unwind
.pending_offset
;
19145 unwind
.pending_offset
= 0;
19147 add_unwind_adjustsp (offset
);
19150 /* Add an opcode to this list for this function. Two-byte opcodes should
19151 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
19155 add_unwind_opcode (valueT op
, int length
)
19157 /* Add any deferred stack adjustment. */
19158 if (unwind
.pending_offset
)
19159 flush_pending_unwind ();
19161 unwind
.sp_restored
= 0;
19163 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
19165 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
19166 if (unwind
.opcodes
)
19167 unwind
.opcodes
= (unsigned char *) xrealloc (unwind
.opcodes
,
19168 unwind
.opcode_alloc
);
19170 unwind
.opcodes
= (unsigned char *) xmalloc (unwind
.opcode_alloc
);
19175 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
19177 unwind
.opcode_count
++;
19181 /* Add unwind opcodes to adjust the stack pointer. */
19184 add_unwind_adjustsp (offsetT offset
)
19188 if (offset
> 0x200)
19190 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
19195 /* Long form: 0xb2, uleb128. */
19196 /* This might not fit in a word so add the individual bytes,
19197 remembering the list is built in reverse order. */
19198 o
= (valueT
) ((offset
- 0x204) >> 2);
19200 add_unwind_opcode (0, 1);
19202 /* Calculate the uleb128 encoding of the offset. */
19206 bytes
[n
] = o
& 0x7f;
19212 /* Add the insn. */
19214 add_unwind_opcode (bytes
[n
- 1], 1);
19215 add_unwind_opcode (0xb2, 1);
19217 else if (offset
> 0x100)
19219 /* Two short opcodes. */
19220 add_unwind_opcode (0x3f, 1);
19221 op
= (offset
- 0x104) >> 2;
19222 add_unwind_opcode (op
, 1);
19224 else if (offset
> 0)
19226 /* Short opcode. */
19227 op
= (offset
- 4) >> 2;
19228 add_unwind_opcode (op
, 1);
19230 else if (offset
< 0)
19233 while (offset
> 0x100)
19235 add_unwind_opcode (0x7f, 1);
19238 op
= ((offset
- 4) >> 2) | 0x40;
19239 add_unwind_opcode (op
, 1);
19243 /* Finish the list of unwind opcodes for this function. */
19245 finish_unwind_opcodes (void)
19249 if (unwind
.fp_used
)
19251 /* Adjust sp as necessary. */
19252 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
19253 flush_pending_unwind ();
19255 /* After restoring sp from the frame pointer. */
19256 op
= 0x90 | unwind
.fp_reg
;
19257 add_unwind_opcode (op
, 1);
19260 flush_pending_unwind ();
19264 /* Start an exception table entry. If idx is nonzero this is an index table
19268 start_unwind_section (const segT text_seg
, int idx
)
19270 const char * text_name
;
19271 const char * prefix
;
19272 const char * prefix_once
;
19273 const char * group_name
;
19277 size_t sec_name_len
;
19284 prefix
= ELF_STRING_ARM_unwind
;
19285 prefix_once
= ELF_STRING_ARM_unwind_once
;
19286 type
= SHT_ARM_EXIDX
;
19290 prefix
= ELF_STRING_ARM_unwind_info
;
19291 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
19292 type
= SHT_PROGBITS
;
19295 text_name
= segment_name (text_seg
);
19296 if (streq (text_name
, ".text"))
19299 if (strncmp (text_name
, ".gnu.linkonce.t.",
19300 strlen (".gnu.linkonce.t.")) == 0)
19302 prefix
= prefix_once
;
19303 text_name
+= strlen (".gnu.linkonce.t.");
19306 prefix_len
= strlen (prefix
);
19307 text_len
= strlen (text_name
);
19308 sec_name_len
= prefix_len
+ text_len
;
19309 sec_name
= (char *) xmalloc (sec_name_len
+ 1);
19310 memcpy (sec_name
, prefix
, prefix_len
);
19311 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
19312 sec_name
[prefix_len
+ text_len
] = '\0';
19318 /* Handle COMDAT group. */
19319 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
19321 group_name
= elf_group_name (text_seg
);
19322 if (group_name
== NULL
)
19324 as_bad (_("Group section `%s' has no group signature"),
19325 segment_name (text_seg
));
19326 ignore_rest_of_line ();
19329 flags
|= SHF_GROUP
;
19333 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
19335 /* Set the section link for index tables. */
19337 elf_linked_to_section (now_seg
) = text_seg
;
19341 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
19342 personality routine data. Returns zero, or the index table value for
19343 and inline entry. */
19346 create_unwind_entry (int have_data
)
19351 /* The current word of data. */
19353 /* The number of bytes left in this word. */
19356 finish_unwind_opcodes ();
19358 /* Remember the current text section. */
19359 unwind
.saved_seg
= now_seg
;
19360 unwind
.saved_subseg
= now_subseg
;
19362 start_unwind_section (now_seg
, 0);
19364 if (unwind
.personality_routine
== NULL
)
19366 if (unwind
.personality_index
== -2)
19369 as_bad (_("handlerdata in cantunwind frame"));
19370 return 1; /* EXIDX_CANTUNWIND. */
19373 /* Use a default personality routine if none is specified. */
19374 if (unwind
.personality_index
== -1)
19376 if (unwind
.opcode_count
> 3)
19377 unwind
.personality_index
= 1;
19379 unwind
.personality_index
= 0;
19382 /* Space for the personality routine entry. */
19383 if (unwind
.personality_index
== 0)
19385 if (unwind
.opcode_count
> 3)
19386 as_bad (_("too many unwind opcodes for personality routine 0"));
19390 /* All the data is inline in the index table. */
19393 while (unwind
.opcode_count
> 0)
19395 unwind
.opcode_count
--;
19396 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
19400 /* Pad with "finish" opcodes. */
19402 data
= (data
<< 8) | 0xb0;
19409 /* We get two opcodes "free" in the first word. */
19410 size
= unwind
.opcode_count
- 2;
19413 /* An extra byte is required for the opcode count. */
19414 size
= unwind
.opcode_count
+ 1;
19416 size
= (size
+ 3) >> 2;
19418 as_bad (_("too many unwind opcodes"));
19420 frag_align (2, 0, 0);
19421 record_alignment (now_seg
, 2);
19422 unwind
.table_entry
= expr_build_dot ();
19424 /* Allocate the table entry. */
19425 ptr
= frag_more ((size
<< 2) + 4);
19426 where
= frag_now_fix () - ((size
<< 2) + 4);
19428 switch (unwind
.personality_index
)
19431 /* ??? Should this be a PLT generating relocation? */
19432 /* Custom personality routine. */
19433 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
19434 BFD_RELOC_ARM_PREL31
);
19439 /* Set the first byte to the number of additional words. */
19444 /* ABI defined personality routines. */
19446 /* Three opcodes bytes are packed into the first word. */
19453 /* The size and first two opcode bytes go in the first word. */
19454 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
19459 /* Should never happen. */
19463 /* Pack the opcodes into words (MSB first), reversing the list at the same
19465 while (unwind
.opcode_count
> 0)
19469 md_number_to_chars (ptr
, data
, 4);
19474 unwind
.opcode_count
--;
19476 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
19479 /* Finish off the last word. */
19482 /* Pad with "finish" opcodes. */
19484 data
= (data
<< 8) | 0xb0;
19486 md_number_to_chars (ptr
, data
, 4);
19491 /* Add an empty descriptor if there is no user-specified data. */
19492 ptr
= frag_more (4);
19493 md_number_to_chars (ptr
, 0, 4);
19500 /* Initialize the DWARF-2 unwind information for this procedure. */
19503 tc_arm_frame_initial_instructions (void)
19505 cfi_add_CFA_def_cfa (REG_SP
, 0);
19507 #endif /* OBJ_ELF */
19509 /* Convert REGNAME to a DWARF-2 register number. */
19512 tc_arm_regname_to_dw2regnum (char *regname
)
19514 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
19524 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
19528 exp
.X_op
= O_secrel
;
19529 exp
.X_add_symbol
= symbol
;
19530 exp
.X_add_number
= 0;
19531 emit_expr (&exp
, size
);
19535 /* MD interface: Symbol and relocation handling. */
19537 /* Return the address within the segment that a PC-relative fixup is
19538 relative to. For ARM, PC-relative fixups applied to instructions
19539 are generally relative to the location of the fixup plus 8 bytes.
19540 Thumb branches are offset by 4, and Thumb loads relative to PC
19541 require special handling. */
19544 md_pcrel_from_section (fixS
* fixP
, segT seg
)
19546 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19548 /* If this is pc-relative and we are going to emit a relocation
19549 then we just want to put out any pipeline compensation that the linker
19550 will need. Otherwise we want to use the calculated base.
19551 For WinCE we skip the bias for externals as well, since this
19552 is how the MS ARM-CE assembler behaves and we want to be compatible. */
19554 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19555 || (arm_force_relocation (fixP
)
19557 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19563 switch (fixP
->fx_r_type
)
19565 /* PC relative addressing on the Thumb is slightly odd as the
19566 bottom two bits of the PC are forced to zero for the
19567 calculation. This happens *after* application of the
19568 pipeline offset. However, Thumb adrl already adjusts for
19569 this, so we need not do it again. */
19570 case BFD_RELOC_ARM_THUMB_ADD
:
19573 case BFD_RELOC_ARM_THUMB_OFFSET
:
19574 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
19575 case BFD_RELOC_ARM_T32_ADD_PC12
:
19576 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
19577 return (base
+ 4) & ~3;
19579 /* Thumb branches are simply offset by +4. */
19580 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
19581 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
19582 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
19583 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19584 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
19587 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
19589 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19590 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19591 && ARM_IS_FUNC (fixP
->fx_addsy
)
19592 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19593 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19596 /* BLX is like branches above, but forces the low two bits of PC to
19598 case BFD_RELOC_THUMB_PCREL_BLX
:
19600 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19601 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19602 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19603 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19604 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19605 return (base
+ 4) & ~3;
19607 /* ARM mode branches are offset by +8. However, the Windows CE
19608 loader expects the relocation not to take this into account. */
19609 case BFD_RELOC_ARM_PCREL_BLX
:
19611 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19612 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19613 && ARM_IS_FUNC (fixP
->fx_addsy
)
19614 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19615 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19618 case BFD_RELOC_ARM_PCREL_CALL
:
19620 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19621 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19622 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19623 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19624 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19627 case BFD_RELOC_ARM_PCREL_BRANCH
:
19628 case BFD_RELOC_ARM_PCREL_JUMP
:
19629 case BFD_RELOC_ARM_PLT32
:
19631 /* When handling fixups immediately, because we have already
19632 discovered the value of a symbol, or the address of the frag involved
19633 we must account for the offset by +8, as the OS loader will never see the reloc.
19634 see fixup_segment() in write.c
19635 The S_IS_EXTERNAL test handles the case of global symbols.
19636 Those need the calculated base, not just the pipe compensation the linker will need. */
19638 && fixP
->fx_addsy
!= NULL
19639 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19640 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
19648 /* ARM mode loads relative to PC are also offset by +8. Unlike
19649 branches, the Windows CE loader *does* expect the relocation
19650 to take this into account. */
19651 case BFD_RELOC_ARM_OFFSET_IMM
:
19652 case BFD_RELOC_ARM_OFFSET_IMM8
:
19653 case BFD_RELOC_ARM_HWLITERAL
:
19654 case BFD_RELOC_ARM_LITERAL
:
19655 case BFD_RELOC_ARM_CP_OFF_IMM
:
19659 /* Other PC-relative relocations are un-offset. */
19665 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19666 Otherwise we have no need to default values of symbols. */
19669 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
19672 if (name
[0] == '_' && name
[1] == 'G'
19673 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
19677 if (symbol_find (name
))
19678 as_bad (_("GOT already in the symbol table"));
19680 GOT_symbol
= symbol_new (name
, undefined_section
,
19681 (valueT
) 0, & zero_address_frag
);
19691 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19692 computed as two separate immediate values, added together. We
19693 already know that this value cannot be computed by just one ARM
19696 static unsigned int
19697 validate_immediate_twopart (unsigned int val
,
19698 unsigned int * highpart
)
19703 for (i
= 0; i
< 32; i
+= 2)
19704 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
19710 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
19712 else if (a
& 0xff0000)
19714 if (a
& 0xff000000)
19716 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
19720 gas_assert (a
& 0xff000000);
19721 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
19724 return (a
& 0xff) | (i
<< 7);
19731 validate_offset_imm (unsigned int val
, int hwse
)
19733 if ((hwse
&& val
> 255) || val
> 4095)
19738 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19739 negative immediate constant by altering the instruction. A bit of
19744 by inverting the second operand, and
19747 by negating the second operand. */
19750 negate_data_op (unsigned long * instruction
,
19751 unsigned long value
)
19754 unsigned long negated
, inverted
;
19756 negated
= encode_arm_immediate (-value
);
19757 inverted
= encode_arm_immediate (~value
);
19759 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
19762 /* First negates. */
19763 case OPCODE_SUB
: /* ADD <-> SUB */
19764 new_inst
= OPCODE_ADD
;
19769 new_inst
= OPCODE_SUB
;
19773 case OPCODE_CMP
: /* CMP <-> CMN */
19774 new_inst
= OPCODE_CMN
;
19779 new_inst
= OPCODE_CMP
;
19783 /* Now Inverted ops. */
19784 case OPCODE_MOV
: /* MOV <-> MVN */
19785 new_inst
= OPCODE_MVN
;
19790 new_inst
= OPCODE_MOV
;
19794 case OPCODE_AND
: /* AND <-> BIC */
19795 new_inst
= OPCODE_BIC
;
19800 new_inst
= OPCODE_AND
;
19804 case OPCODE_ADC
: /* ADC <-> SBC */
19805 new_inst
= OPCODE_SBC
;
19810 new_inst
= OPCODE_ADC
;
19814 /* We cannot do anything. */
19819 if (value
== (unsigned) FAIL
)
19822 *instruction
&= OPCODE_MASK
;
19823 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
19827 /* Like negate_data_op, but for Thumb-2. */
19829 static unsigned int
19830 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
19834 unsigned int negated
, inverted
;
19836 negated
= encode_thumb32_immediate (-value
);
19837 inverted
= encode_thumb32_immediate (~value
);
19839 rd
= (*instruction
>> 8) & 0xf;
19840 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
19843 /* ADD <-> SUB. Includes CMP <-> CMN. */
19844 case T2_OPCODE_SUB
:
19845 new_inst
= T2_OPCODE_ADD
;
19849 case T2_OPCODE_ADD
:
19850 new_inst
= T2_OPCODE_SUB
;
19854 /* ORR <-> ORN. Includes MOV <-> MVN. */
19855 case T2_OPCODE_ORR
:
19856 new_inst
= T2_OPCODE_ORN
;
19860 case T2_OPCODE_ORN
:
19861 new_inst
= T2_OPCODE_ORR
;
19865 /* AND <-> BIC. TST has no inverted equivalent. */
19866 case T2_OPCODE_AND
:
19867 new_inst
= T2_OPCODE_BIC
;
19874 case T2_OPCODE_BIC
:
19875 new_inst
= T2_OPCODE_AND
;
19880 case T2_OPCODE_ADC
:
19881 new_inst
= T2_OPCODE_SBC
;
19885 case T2_OPCODE_SBC
:
19886 new_inst
= T2_OPCODE_ADC
;
19890 /* We cannot do anything. */
19895 if (value
== (unsigned int)FAIL
)
19898 *instruction
&= T2_OPCODE_MASK
;
19899 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
19903 /* Read a 32-bit thumb instruction from buf. */
19904 static unsigned long
19905 get_thumb32_insn (char * buf
)
19907 unsigned long insn
;
19908 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
19909 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19915 /* We usually want to set the low bit on the address of thumb function
19916 symbols. In particular .word foo - . should have the low bit set.
19917 Generic code tries to fold the difference of two symbols to
19918 a constant. Prevent this and force a relocation when the first symbols
19919 is a thumb function. */
19922 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
19924 if (op
== O_subtract
19925 && l
->X_op
== O_symbol
19926 && r
->X_op
== O_symbol
19927 && THUMB_IS_FUNC (l
->X_add_symbol
))
19929 l
->X_op
= O_subtract
;
19930 l
->X_op_symbol
= r
->X_add_symbol
;
19931 l
->X_add_number
-= r
->X_add_number
;
19935 /* Process as normal. */
19939 /* Encode Thumb2 unconditional branches and calls. The encoding
19940 for the 2 are identical for the immediate values. */
19943 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
19945 #define T2I1I2MASK ((1 << 13) | (1 << 11))
19948 addressT S
, I1
, I2
, lo
, hi
;
19950 S
= (value
>> 24) & 0x01;
19951 I1
= (value
>> 23) & 0x01;
19952 I2
= (value
>> 22) & 0x01;
19953 hi
= (value
>> 12) & 0x3ff;
19954 lo
= (value
>> 1) & 0x7ff;
19955 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19956 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19957 newval
|= (S
<< 10) | hi
;
19958 newval2
&= ~T2I1I2MASK
;
19959 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
19960 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19961 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
19965 md_apply_fix (fixS
* fixP
,
19969 offsetT value
= * valP
;
19971 unsigned int newimm
;
19972 unsigned long temp
;
19974 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
19976 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
19978 /* Note whether this will delete the relocation. */
19980 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
19983 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19984 consistency with the behaviour on 32-bit hosts. Remember value
19986 value
&= 0xffffffff;
19987 value
^= 0x80000000;
19988 value
-= 0x80000000;
19991 fixP
->fx_addnumber
= value
;
19993 /* Same treatment for fixP->fx_offset. */
19994 fixP
->fx_offset
&= 0xffffffff;
19995 fixP
->fx_offset
^= 0x80000000;
19996 fixP
->fx_offset
-= 0x80000000;
19998 switch (fixP
->fx_r_type
)
20000 case BFD_RELOC_NONE
:
20001 /* This will need to go in the object file. */
20005 case BFD_RELOC_ARM_IMMEDIATE
:
20006 /* We claim that this fixup has been processed here,
20007 even if in fact we generate an error because we do
20008 not have a reloc for it, so tc_gen_reloc will reject it. */
20011 if (fixP
->fx_addsy
)
20013 const char *msg
= 0;
20015 if (! S_IS_DEFINED (fixP
->fx_addsy
))
20016 msg
= _("undefined symbol %s used as an immediate value");
20017 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
20018 msg
= _("symbol %s is in a different section");
20019 else if (S_IS_WEAK (fixP
->fx_addsy
))
20020 msg
= _("symbol %s is weak and may be overridden later");
20024 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20025 msg
, S_GET_NAME (fixP
->fx_addsy
));
20030 newimm
= encode_arm_immediate (value
);
20031 temp
= md_chars_to_number (buf
, INSN_SIZE
);
20033 /* If the instruction will fail, see if we can fix things up by
20034 changing the opcode. */
20035 if (newimm
== (unsigned int) FAIL
20036 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
20038 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20039 _("invalid constant (%lx) after fixup"),
20040 (unsigned long) value
);
20044 newimm
|= (temp
& 0xfffff000);
20045 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
20048 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
20050 unsigned int highpart
= 0;
20051 unsigned int newinsn
= 0xe1a00000; /* nop. */
20053 if (fixP
->fx_addsy
)
20055 const char *msg
= 0;
20057 if (! S_IS_DEFINED (fixP
->fx_addsy
))
20058 msg
= _("undefined symbol %s used as an immediate value");
20059 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
20060 msg
= _("symbol %s is in a different section");
20061 else if (S_IS_WEAK (fixP
->fx_addsy
))
20062 msg
= _("symbol %s is weak and may be overridden later");
20066 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20067 msg
, S_GET_NAME (fixP
->fx_addsy
));
20072 newimm
= encode_arm_immediate (value
);
20073 temp
= md_chars_to_number (buf
, INSN_SIZE
);
20075 /* If the instruction will fail, see if we can fix things up by
20076 changing the opcode. */
20077 if (newimm
== (unsigned int) FAIL
20078 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
20080 /* No ? OK - try using two ADD instructions to generate
20082 newimm
= validate_immediate_twopart (value
, & highpart
);
20084 /* Yes - then make sure that the second instruction is
20086 if (newimm
!= (unsigned int) FAIL
)
20088 /* Still No ? Try using a negated value. */
20089 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
20090 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
20091 /* Otherwise - give up. */
20094 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20095 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
20100 /* Replace the first operand in the 2nd instruction (which
20101 is the PC) with the destination register. We have
20102 already added in the PC in the first instruction and we
20103 do not want to do it again. */
20104 newinsn
&= ~ 0xf0000;
20105 newinsn
|= ((newinsn
& 0x0f000) << 4);
20108 newimm
|= (temp
& 0xfffff000);
20109 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
20111 highpart
|= (newinsn
& 0xfffff000);
20112 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
20116 case BFD_RELOC_ARM_OFFSET_IMM
:
20117 if (!fixP
->fx_done
&& seg
->use_rela_p
)
20120 case BFD_RELOC_ARM_LITERAL
:
20126 if (validate_offset_imm (value
, 0) == FAIL
)
20128 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
20129 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20130 _("invalid literal constant: pool needs to be closer"));
20132 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20133 _("bad immediate value for offset (%ld)"),
20138 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20139 newval
&= 0xff7ff000;
20140 newval
|= value
| (sign
? INDEX_UP
: 0);
20141 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20144 case BFD_RELOC_ARM_OFFSET_IMM8
:
20145 case BFD_RELOC_ARM_HWLITERAL
:
20151 if (validate_offset_imm (value
, 1) == FAIL
)
20153 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
20154 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20155 _("invalid literal constant: pool needs to be closer"));
20157 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
20162 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20163 newval
&= 0xff7ff0f0;
20164 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
20165 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20168 case BFD_RELOC_ARM_T32_OFFSET_U8
:
20169 if (value
< 0 || value
> 1020 || value
% 4 != 0)
20170 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20171 _("bad immediate value for offset (%ld)"), (long) value
);
20174 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
20176 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
20179 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
20180 /* This is a complicated relocation used for all varieties of Thumb32
20181 load/store instruction with immediate offset:
20183 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
20184 *4, optional writeback(W)
20185 (doubleword load/store)
20187 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
20188 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
20189 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
20190 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
20191 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
20193 Uppercase letters indicate bits that are already encoded at
20194 this point. Lowercase letters are our problem. For the
20195 second block of instructions, the secondary opcode nybble
20196 (bits 8..11) is present, and bit 23 is zero, even if this is
20197 a PC-relative operation. */
20198 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20200 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
20202 if ((newval
& 0xf0000000) == 0xe0000000)
20204 /* Doubleword load/store: 8-bit offset, scaled by 4. */
20206 newval
|= (1 << 23);
20209 if (value
% 4 != 0)
20211 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20212 _("offset not a multiple of 4"));
20218 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20219 _("offset out of range"));
20224 else if ((newval
& 0x000f0000) == 0x000f0000)
20226 /* PC-relative, 12-bit offset. */
20228 newval
|= (1 << 23);
20233 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20234 _("offset out of range"));
20239 else if ((newval
& 0x00000100) == 0x00000100)
20241 /* Writeback: 8-bit, +/- offset. */
20243 newval
|= (1 << 9);
20248 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20249 _("offset out of range"));
20254 else if ((newval
& 0x00000f00) == 0x00000e00)
20256 /* T-instruction: positive 8-bit offset. */
20257 if (value
< 0 || value
> 0xff)
20259 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20260 _("offset out of range"));
20268 /* Positive 12-bit or negative 8-bit offset. */
20272 newval
|= (1 << 23);
20282 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20283 _("offset out of range"));
20290 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
20291 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
20294 case BFD_RELOC_ARM_SHIFT_IMM
:
20295 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20296 if (((unsigned long) value
) > 32
20298 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
20300 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20301 _("shift expression is too large"));
20306 /* Shifts of zero must be done as lsl. */
20308 else if (value
== 32)
20310 newval
&= 0xfffff07f;
20311 newval
|= (value
& 0x1f) << 7;
20312 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20315 case BFD_RELOC_ARM_T32_IMMEDIATE
:
20316 case BFD_RELOC_ARM_T32_ADD_IMM
:
20317 case BFD_RELOC_ARM_T32_IMM12
:
20318 case BFD_RELOC_ARM_T32_ADD_PC12
:
20319 /* We claim that this fixup has been processed here,
20320 even if in fact we generate an error because we do
20321 not have a reloc for it, so tc_gen_reloc will reject it. */
20325 && ! S_IS_DEFINED (fixP
->fx_addsy
))
20327 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20328 _("undefined symbol %s used as an immediate value"),
20329 S_GET_NAME (fixP
->fx_addsy
));
20333 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20335 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
20338 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
20339 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
20341 newimm
= encode_thumb32_immediate (value
);
20342 if (newimm
== (unsigned int) FAIL
)
20343 newimm
= thumb32_negate_data_op (&newval
, value
);
20345 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
20346 && newimm
== (unsigned int) FAIL
)
20348 /* Turn add/sum into addw/subw. */
20349 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
20350 newval
= (newval
& 0xfeffffff) | 0x02000000;
20351 /* No flat 12-bit imm encoding for addsw/subsw. */
20352 if ((newval
& 0x00100000) == 0)
20354 /* 12 bit immediate for addw/subw. */
20358 newval
^= 0x00a00000;
20361 newimm
= (unsigned int) FAIL
;
20367 if (newimm
== (unsigned int)FAIL
)
20369 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20370 _("invalid constant (%lx) after fixup"),
20371 (unsigned long) value
);
20375 newval
|= (newimm
& 0x800) << 15;
20376 newval
|= (newimm
& 0x700) << 4;
20377 newval
|= (newimm
& 0x0ff);
20379 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
20380 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
20383 case BFD_RELOC_ARM_SMC
:
20384 if (((unsigned long) value
) > 0xffff)
20385 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20386 _("invalid smc expression"));
20387 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20388 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
20389 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20392 case BFD_RELOC_ARM_SWI
:
20393 if (fixP
->tc_fix_data
!= 0)
20395 if (((unsigned long) value
) > 0xff)
20396 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20397 _("invalid swi expression"));
20398 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20400 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20404 if (((unsigned long) value
) > 0x00ffffff)
20405 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20406 _("invalid swi expression"));
20407 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20409 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20413 case BFD_RELOC_ARM_MULTI
:
20414 if (((unsigned long) value
) > 0xffff)
20415 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20416 _("invalid expression in load/store multiple"));
20417 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
20418 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20422 case BFD_RELOC_ARM_PCREL_CALL
:
20424 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
20426 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20427 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20428 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20429 /* Flip the bl to blx. This is a simple flip
20430 bit here because we generate PCREL_CALL for
20431 unconditional bls. */
20433 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20434 newval
= newval
| 0x10000000;
20435 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20441 goto arm_branch_common
;
20443 case BFD_RELOC_ARM_PCREL_JUMP
:
20444 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
20446 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20447 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20448 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20450 /* This would map to a bl<cond>, b<cond>,
20451 b<always> to a Thumb function. We
20452 need to force a relocation for this particular
20454 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20458 case BFD_RELOC_ARM_PLT32
:
20460 case BFD_RELOC_ARM_PCREL_BRANCH
:
20462 goto arm_branch_common
;
20464 case BFD_RELOC_ARM_PCREL_BLX
:
20467 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
20469 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20470 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20471 && ARM_IS_FUNC (fixP
->fx_addsy
))
20473 /* Flip the blx to a bl and warn. */
20474 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
20475 newval
= 0xeb000000;
20476 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
20477 _("blx to '%s' an ARM ISA state function changed to bl"),
20479 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20485 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
20486 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
20490 /* We are going to store value (shifted right by two) in the
20491 instruction, in a 24 bit, signed field. Bits 26 through 32 either
20492 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
20493 also be be clear. */
20495 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20496 _("misaligned branch destination"));
20497 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
20498 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
20499 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20500 _("branch out of range"));
20502 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20504 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20505 newval
|= (value
>> 2) & 0x00ffffff;
20506 /* Set the H bit on BLX instructions. */
20510 newval
|= 0x01000000;
20512 newval
&= ~0x01000000;
20514 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20518 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
20519 /* CBZ can only branch forward. */
20521 /* Attempts to use CBZ to branch to the next instruction
20522 (which, strictly speaking, are prohibited) will be turned into
20525 FIXME: It may be better to remove the instruction completely and
20526 perform relaxation. */
20529 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20530 newval
= 0xbf00; /* NOP encoding T1 */
20531 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20536 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20537 _("branch out of range"));
20539 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20541 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20542 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
20543 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20548 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
20549 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
20550 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20551 _("branch out of range"));
20553 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20555 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20556 newval
|= (value
& 0x1ff) >> 1;
20557 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20561 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
20562 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
20563 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20564 _("branch out of range"));
20566 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20568 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20569 newval
|= (value
& 0xfff) >> 1;
20570 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20574 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
20576 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20577 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20578 && S_IS_DEFINED (fixP
->fx_addsy
)
20579 && ARM_IS_FUNC (fixP
->fx_addsy
)
20580 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
20582 /* Force a relocation for a branch 20 bits wide. */
20585 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
20586 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20587 _("conditional branch out of range"));
20589 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20592 addressT S
, J1
, J2
, lo
, hi
;
20594 S
= (value
& 0x00100000) >> 20;
20595 J2
= (value
& 0x00080000) >> 19;
20596 J1
= (value
& 0x00040000) >> 18;
20597 hi
= (value
& 0x0003f000) >> 12;
20598 lo
= (value
& 0x00000ffe) >> 1;
20600 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20601 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20602 newval
|= (S
<< 10) | hi
;
20603 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
20604 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20605 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
20609 case BFD_RELOC_THUMB_PCREL_BLX
:
20611 /* If there is a blx from a thumb state function to
20612 another thumb function flip this to a bl and warn
20616 && S_IS_DEFINED (fixP
->fx_addsy
)
20617 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20618 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20619 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20621 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
20622 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
20623 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20625 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20626 newval
= newval
| 0x1000;
20627 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20628 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20633 goto thumb_bl_common
;
20635 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
20637 /* A bl from Thumb state ISA to an internal ARM state function
20638 is converted to a blx. */
20640 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20641 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20642 && S_IS_DEFINED (fixP
->fx_addsy
)
20643 && ARM_IS_FUNC (fixP
->fx_addsy
)
20644 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
20646 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20647 newval
= newval
& ~0x1000;
20648 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20649 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
20656 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
&&
20657 fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20658 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20661 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20662 /* For a BLX instruction, make sure that the relocation is rounded up
20663 to a word boundary. This follows the semantics of the instruction
20664 which specifies that bit 1 of the target address will come from bit
20665 1 of the base address. */
20666 value
= (value
+ 1) & ~ 1;
20669 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
20671 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
)))
20673 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20674 _("branch out of range"));
20676 else if ((value
& ~0x1ffffff)
20677 && ((value
& ~0x1ffffff) != ~0x1ffffff))
20679 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20680 _("Thumb2 branch out of range"));
20684 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20685 encode_thumb2_b_bl_offset (buf
, value
);
20689 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
20690 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
20691 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20692 _("branch out of range"));
20694 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20695 encode_thumb2_b_bl_offset (buf
, value
);
20700 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20701 md_number_to_chars (buf
, value
, 1);
20705 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20706 md_number_to_chars (buf
, value
, 2);
20710 case BFD_RELOC_ARM_TLS_GD32
:
20711 case BFD_RELOC_ARM_TLS_LE32
:
20712 case BFD_RELOC_ARM_TLS_IE32
:
20713 case BFD_RELOC_ARM_TLS_LDM32
:
20714 case BFD_RELOC_ARM_TLS_LDO32
:
20715 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
20718 case BFD_RELOC_ARM_GOT32
:
20719 case BFD_RELOC_ARM_GOTOFF
:
20720 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20721 md_number_to_chars (buf
, 0, 4);
20724 case BFD_RELOC_ARM_GOT_PREL
:
20725 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20726 md_number_to_chars (buf
, value
, 4);
20729 case BFD_RELOC_ARM_TARGET2
:
20730 /* TARGET2 is not partial-inplace, so we need to write the
20731 addend here for REL targets, because it won't be written out
20732 during reloc processing later. */
20733 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20734 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
20738 case BFD_RELOC_RVA
:
20740 case BFD_RELOC_ARM_TARGET1
:
20741 case BFD_RELOC_ARM_ROSEGREL32
:
20742 case BFD_RELOC_ARM_SBREL32
:
20743 case BFD_RELOC_32_PCREL
:
20745 case BFD_RELOC_32_SECREL
:
20747 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20749 /* For WinCE we only do this for pcrel fixups. */
20750 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
20752 md_number_to_chars (buf
, value
, 4);
20756 case BFD_RELOC_ARM_PREL31
:
20757 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20759 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
20760 if ((value
^ (value
>> 1)) & 0x40000000)
20762 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20763 _("rel31 relocation overflow"));
20765 newval
|= value
& 0x7fffffff;
20766 md_number_to_chars (buf
, newval
, 4);
20771 case BFD_RELOC_ARM_CP_OFF_IMM
:
20772 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
20773 if (value
< -1023 || value
> 1023 || (value
& 3))
20774 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20775 _("co-processor offset out of range"));
20780 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20781 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20782 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20784 newval
= get_thumb32_insn (buf
);
20785 newval
&= 0xff7fff00;
20786 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
20787 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20788 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20789 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20791 put_thumb32_insn (buf
, newval
);
20794 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
20795 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
20796 if (value
< -255 || value
> 255)
20797 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20798 _("co-processor offset out of range"));
20800 goto cp_off_common
;
20802 case BFD_RELOC_ARM_THUMB_OFFSET
:
20803 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20804 /* Exactly what ranges, and where the offset is inserted depends
20805 on the type of instruction, we can establish this from the
20807 switch (newval
>> 12)
20809 case 4: /* PC load. */
20810 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20811 forced to zero for these loads; md_pcrel_from has already
20812 compensated for this. */
20814 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20815 _("invalid offset, target not word aligned (0x%08lX)"),
20816 (((unsigned long) fixP
->fx_frag
->fr_address
20817 + (unsigned long) fixP
->fx_where
) & ~3)
20818 + (unsigned long) value
);
20820 if (value
& ~0x3fc)
20821 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20822 _("invalid offset, value too big (0x%08lX)"),
20825 newval
|= value
>> 2;
20828 case 9: /* SP load/store. */
20829 if (value
& ~0x3fc)
20830 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20831 _("invalid offset, value too big (0x%08lX)"),
20833 newval
|= value
>> 2;
20836 case 6: /* Word load/store. */
20838 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20839 _("invalid offset, value too big (0x%08lX)"),
20841 newval
|= value
<< 4; /* 6 - 2. */
20844 case 7: /* Byte load/store. */
20846 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20847 _("invalid offset, value too big (0x%08lX)"),
20849 newval
|= value
<< 6;
20852 case 8: /* Halfword load/store. */
20854 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20855 _("invalid offset, value too big (0x%08lX)"),
20857 newval
|= value
<< 5; /* 6 - 1. */
20861 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20862 "Unable to process relocation for thumb opcode: %lx",
20863 (unsigned long) newval
);
20866 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20869 case BFD_RELOC_ARM_THUMB_ADD
:
20870 /* This is a complicated relocation, since we use it for all of
20871 the following immediate relocations:
20875 9bit ADD/SUB SP word-aligned
20876 10bit ADD PC/SP word-aligned
20878 The type of instruction being processed is encoded in the
20885 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20887 int rd
= (newval
>> 4) & 0xf;
20888 int rs
= newval
& 0xf;
20889 int subtract
= !!(newval
& 0x8000);
20891 /* Check for HI regs, only very restricted cases allowed:
20892 Adjusting SP, and using PC or SP to get an address. */
20893 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
20894 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
20895 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20896 _("invalid Hi register with immediate"));
20898 /* If value is negative, choose the opposite instruction. */
20902 subtract
= !subtract
;
20904 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20905 _("immediate value out of range"));
20910 if (value
& ~0x1fc)
20911 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20912 _("invalid immediate for stack address calculation"));
20913 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
20914 newval
|= value
>> 2;
20916 else if (rs
== REG_PC
|| rs
== REG_SP
)
20918 if (subtract
|| value
& ~0x3fc)
20919 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20920 _("invalid immediate for address calculation (value = 0x%08lX)"),
20921 (unsigned long) value
);
20922 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
20924 newval
|= value
>> 2;
20929 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20930 _("immediate value out of range"));
20931 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
20932 newval
|= (rd
<< 8) | value
;
20937 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20938 _("immediate value out of range"));
20939 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
20940 newval
|= rd
| (rs
<< 3) | (value
<< 6);
20943 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20946 case BFD_RELOC_ARM_THUMB_IMM
:
20947 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20948 if (value
< 0 || value
> 255)
20949 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20950 _("invalid immediate: %ld is out of range"),
20953 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20956 case BFD_RELOC_ARM_THUMB_SHIFT
:
20957 /* 5bit shift value (0..32). LSL cannot take 32. */
20958 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
20959 temp
= newval
& 0xf800;
20960 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
20961 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20962 _("invalid shift value: %ld"), (long) value
);
20963 /* Shifts of zero must be encoded as LSL. */
20965 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
20966 /* Shifts of 32 are encoded as zero. */
20967 else if (value
== 32)
20969 newval
|= value
<< 6;
20970 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20973 case BFD_RELOC_VTABLE_INHERIT
:
20974 case BFD_RELOC_VTABLE_ENTRY
:
20978 case BFD_RELOC_ARM_MOVW
:
20979 case BFD_RELOC_ARM_MOVT
:
20980 case BFD_RELOC_ARM_THUMB_MOVW
:
20981 case BFD_RELOC_ARM_THUMB_MOVT
:
20982 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20984 /* REL format relocations are limited to a 16-bit addend. */
20985 if (!fixP
->fx_done
)
20987 if (value
< -0x8000 || value
> 0x7fff)
20988 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20989 _("offset out of range"));
20991 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
20992 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
20997 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
20998 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
21000 newval
= get_thumb32_insn (buf
);
21001 newval
&= 0xfbf08f00;
21002 newval
|= (value
& 0xf000) << 4;
21003 newval
|= (value
& 0x0800) << 15;
21004 newval
|= (value
& 0x0700) << 4;
21005 newval
|= (value
& 0x00ff);
21006 put_thumb32_insn (buf
, newval
);
21010 newval
= md_chars_to_number (buf
, 4);
21011 newval
&= 0xfff0f000;
21012 newval
|= value
& 0x0fff;
21013 newval
|= (value
& 0xf000) << 4;
21014 md_number_to_chars (buf
, newval
, 4);
21019 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
21020 case BFD_RELOC_ARM_ALU_PC_G0
:
21021 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
21022 case BFD_RELOC_ARM_ALU_PC_G1
:
21023 case BFD_RELOC_ARM_ALU_PC_G2
:
21024 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
21025 case BFD_RELOC_ARM_ALU_SB_G0
:
21026 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
21027 case BFD_RELOC_ARM_ALU_SB_G1
:
21028 case BFD_RELOC_ARM_ALU_SB_G2
:
21029 gas_assert (!fixP
->fx_done
);
21030 if (!seg
->use_rela_p
)
21033 bfd_vma encoded_addend
;
21034 bfd_vma addend_abs
= abs (value
);
21036 /* Check that the absolute value of the addend can be
21037 expressed as an 8-bit constant plus a rotation. */
21038 encoded_addend
= encode_arm_immediate (addend_abs
);
21039 if (encoded_addend
== (unsigned int) FAIL
)
21040 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21041 _("the offset 0x%08lX is not representable"),
21042 (unsigned long) addend_abs
);
21044 /* Extract the instruction. */
21045 insn
= md_chars_to_number (buf
, INSN_SIZE
);
21047 /* If the addend is positive, use an ADD instruction.
21048 Otherwise use a SUB. Take care not to destroy the S bit. */
21049 insn
&= 0xff1fffff;
21055 /* Place the encoded addend into the first 12 bits of the
21057 insn
&= 0xfffff000;
21058 insn
|= encoded_addend
;
21060 /* Update the instruction. */
21061 md_number_to_chars (buf
, insn
, INSN_SIZE
);
21065 case BFD_RELOC_ARM_LDR_PC_G0
:
21066 case BFD_RELOC_ARM_LDR_PC_G1
:
21067 case BFD_RELOC_ARM_LDR_PC_G2
:
21068 case BFD_RELOC_ARM_LDR_SB_G0
:
21069 case BFD_RELOC_ARM_LDR_SB_G1
:
21070 case BFD_RELOC_ARM_LDR_SB_G2
:
21071 gas_assert (!fixP
->fx_done
);
21072 if (!seg
->use_rela_p
)
21075 bfd_vma addend_abs
= abs (value
);
21077 /* Check that the absolute value of the addend can be
21078 encoded in 12 bits. */
21079 if (addend_abs
>= 0x1000)
21080 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21081 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
21082 (unsigned long) addend_abs
);
21084 /* Extract the instruction. */
21085 insn
= md_chars_to_number (buf
, INSN_SIZE
);
21087 /* If the addend is negative, clear bit 23 of the instruction.
21088 Otherwise set it. */
21090 insn
&= ~(1 << 23);
21094 /* Place the absolute value of the addend into the first 12 bits
21095 of the instruction. */
21096 insn
&= 0xfffff000;
21097 insn
|= addend_abs
;
21099 /* Update the instruction. */
21100 md_number_to_chars (buf
, insn
, INSN_SIZE
);
21104 case BFD_RELOC_ARM_LDRS_PC_G0
:
21105 case BFD_RELOC_ARM_LDRS_PC_G1
:
21106 case BFD_RELOC_ARM_LDRS_PC_G2
:
21107 case BFD_RELOC_ARM_LDRS_SB_G0
:
21108 case BFD_RELOC_ARM_LDRS_SB_G1
:
21109 case BFD_RELOC_ARM_LDRS_SB_G2
:
21110 gas_assert (!fixP
->fx_done
);
21111 if (!seg
->use_rela_p
)
21114 bfd_vma addend_abs
= abs (value
);
21116 /* Check that the absolute value of the addend can be
21117 encoded in 8 bits. */
21118 if (addend_abs
>= 0x100)
21119 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21120 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
21121 (unsigned long) addend_abs
);
21123 /* Extract the instruction. */
21124 insn
= md_chars_to_number (buf
, INSN_SIZE
);
21126 /* If the addend is negative, clear bit 23 of the instruction.
21127 Otherwise set it. */
21129 insn
&= ~(1 << 23);
21133 /* Place the first four bits of the absolute value of the addend
21134 into the first 4 bits of the instruction, and the remaining
21135 four into bits 8 .. 11. */
21136 insn
&= 0xfffff0f0;
21137 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
21139 /* Update the instruction. */
21140 md_number_to_chars (buf
, insn
, INSN_SIZE
);
21144 case BFD_RELOC_ARM_LDC_PC_G0
:
21145 case BFD_RELOC_ARM_LDC_PC_G1
:
21146 case BFD_RELOC_ARM_LDC_PC_G2
:
21147 case BFD_RELOC_ARM_LDC_SB_G0
:
21148 case BFD_RELOC_ARM_LDC_SB_G1
:
21149 case BFD_RELOC_ARM_LDC_SB_G2
:
21150 gas_assert (!fixP
->fx_done
);
21151 if (!seg
->use_rela_p
)
21154 bfd_vma addend_abs
= abs (value
);
21156 /* Check that the absolute value of the addend is a multiple of
21157 four and, when divided by four, fits in 8 bits. */
21158 if (addend_abs
& 0x3)
21159 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21160 _("bad offset 0x%08lX (must be word-aligned)"),
21161 (unsigned long) addend_abs
);
21163 if ((addend_abs
>> 2) > 0xff)
21164 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21165 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
21166 (unsigned long) addend_abs
);
21168 /* Extract the instruction. */
21169 insn
= md_chars_to_number (buf
, INSN_SIZE
);
21171 /* If the addend is negative, clear bit 23 of the instruction.
21172 Otherwise set it. */
21174 insn
&= ~(1 << 23);
21178 /* Place the addend (divided by four) into the first eight
21179 bits of the instruction. */
21180 insn
&= 0xfffffff0;
21181 insn
|= addend_abs
>> 2;
21183 /* Update the instruction. */
21184 md_number_to_chars (buf
, insn
, INSN_SIZE
);
21188 case BFD_RELOC_ARM_V4BX
:
21189 /* This will need to go in the object file. */
21193 case BFD_RELOC_UNUSED
:
21195 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21196 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
21200 /* Translate internal representation of relocation info to BFD target
21204 tc_gen_reloc (asection
*section
, fixS
*fixp
)
21207 bfd_reloc_code_real_type code
;
21209 reloc
= (arelent
*) xmalloc (sizeof (arelent
));
21211 reloc
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
21212 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
21213 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
21215 if (fixp
->fx_pcrel
)
21217 if (section
->use_rela_p
)
21218 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
21220 fixp
->fx_offset
= reloc
->address
;
21222 reloc
->addend
= fixp
->fx_offset
;
21224 switch (fixp
->fx_r_type
)
21227 if (fixp
->fx_pcrel
)
21229 code
= BFD_RELOC_8_PCREL
;
21234 if (fixp
->fx_pcrel
)
21236 code
= BFD_RELOC_16_PCREL
;
21241 if (fixp
->fx_pcrel
)
21243 code
= BFD_RELOC_32_PCREL
;
21247 case BFD_RELOC_ARM_MOVW
:
21248 if (fixp
->fx_pcrel
)
21250 code
= BFD_RELOC_ARM_MOVW_PCREL
;
21254 case BFD_RELOC_ARM_MOVT
:
21255 if (fixp
->fx_pcrel
)
21257 code
= BFD_RELOC_ARM_MOVT_PCREL
;
21261 case BFD_RELOC_ARM_THUMB_MOVW
:
21262 if (fixp
->fx_pcrel
)
21264 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
21268 case BFD_RELOC_ARM_THUMB_MOVT
:
21269 if (fixp
->fx_pcrel
)
21271 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
21275 case BFD_RELOC_NONE
:
21276 case BFD_RELOC_ARM_PCREL_BRANCH
:
21277 case BFD_RELOC_ARM_PCREL_BLX
:
21278 case BFD_RELOC_RVA
:
21279 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
21280 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
21281 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
21282 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21283 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21284 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21285 case BFD_RELOC_VTABLE_ENTRY
:
21286 case BFD_RELOC_VTABLE_INHERIT
:
21288 case BFD_RELOC_32_SECREL
:
21290 code
= fixp
->fx_r_type
;
21293 case BFD_RELOC_THUMB_PCREL_BLX
:
21295 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
21296 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
21299 code
= BFD_RELOC_THUMB_PCREL_BLX
;
21302 case BFD_RELOC_ARM_LITERAL
:
21303 case BFD_RELOC_ARM_HWLITERAL
:
21304 /* If this is called then the a literal has
21305 been referenced across a section boundary. */
21306 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21307 _("literal referenced across section boundary"));
21311 case BFD_RELOC_ARM_GOT32
:
21312 case BFD_RELOC_ARM_GOTOFF
:
21313 case BFD_RELOC_ARM_GOT_PREL
:
21314 case BFD_RELOC_ARM_PLT32
:
21315 case BFD_RELOC_ARM_TARGET1
:
21316 case BFD_RELOC_ARM_ROSEGREL32
:
21317 case BFD_RELOC_ARM_SBREL32
:
21318 case BFD_RELOC_ARM_PREL31
:
21319 case BFD_RELOC_ARM_TARGET2
:
21320 case BFD_RELOC_ARM_TLS_LE32
:
21321 case BFD_RELOC_ARM_TLS_LDO32
:
21322 case BFD_RELOC_ARM_PCREL_CALL
:
21323 case BFD_RELOC_ARM_PCREL_JUMP
:
21324 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
21325 case BFD_RELOC_ARM_ALU_PC_G0
:
21326 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
21327 case BFD_RELOC_ARM_ALU_PC_G1
:
21328 case BFD_RELOC_ARM_ALU_PC_G2
:
21329 case BFD_RELOC_ARM_LDR_PC_G0
:
21330 case BFD_RELOC_ARM_LDR_PC_G1
:
21331 case BFD_RELOC_ARM_LDR_PC_G2
:
21332 case BFD_RELOC_ARM_LDRS_PC_G0
:
21333 case BFD_RELOC_ARM_LDRS_PC_G1
:
21334 case BFD_RELOC_ARM_LDRS_PC_G2
:
21335 case BFD_RELOC_ARM_LDC_PC_G0
:
21336 case BFD_RELOC_ARM_LDC_PC_G1
:
21337 case BFD_RELOC_ARM_LDC_PC_G2
:
21338 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
21339 case BFD_RELOC_ARM_ALU_SB_G0
:
21340 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
21341 case BFD_RELOC_ARM_ALU_SB_G1
:
21342 case BFD_RELOC_ARM_ALU_SB_G2
:
21343 case BFD_RELOC_ARM_LDR_SB_G0
:
21344 case BFD_RELOC_ARM_LDR_SB_G1
:
21345 case BFD_RELOC_ARM_LDR_SB_G2
:
21346 case BFD_RELOC_ARM_LDRS_SB_G0
:
21347 case BFD_RELOC_ARM_LDRS_SB_G1
:
21348 case BFD_RELOC_ARM_LDRS_SB_G2
:
21349 case BFD_RELOC_ARM_LDC_SB_G0
:
21350 case BFD_RELOC_ARM_LDC_SB_G1
:
21351 case BFD_RELOC_ARM_LDC_SB_G2
:
21352 case BFD_RELOC_ARM_V4BX
:
21353 code
= fixp
->fx_r_type
;
21356 case BFD_RELOC_ARM_TLS_GD32
:
21357 case BFD_RELOC_ARM_TLS_IE32
:
21358 case BFD_RELOC_ARM_TLS_LDM32
:
21359 /* BFD will include the symbol's address in the addend.
21360 But we don't want that, so subtract it out again here. */
21361 if (!S_IS_COMMON (fixp
->fx_addsy
))
21362 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
21363 code
= fixp
->fx_r_type
;
21367 case BFD_RELOC_ARM_IMMEDIATE
:
21368 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21369 _("internal relocation (type: IMMEDIATE) not fixed up"));
21372 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
21373 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21374 _("ADRL used for a symbol not defined in the same file"));
21377 case BFD_RELOC_ARM_OFFSET_IMM
:
21378 if (section
->use_rela_p
)
21380 code
= fixp
->fx_r_type
;
21384 if (fixp
->fx_addsy
!= NULL
21385 && !S_IS_DEFINED (fixp
->fx_addsy
)
21386 && S_IS_LOCAL (fixp
->fx_addsy
))
21388 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21389 _("undefined local label `%s'"),
21390 S_GET_NAME (fixp
->fx_addsy
));
21394 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21395 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21402 switch (fixp
->fx_r_type
)
21404 case BFD_RELOC_NONE
: type
= "NONE"; break;
21405 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
21406 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
21407 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
21408 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
21409 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
21410 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
21411 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
21412 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
21413 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
21414 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
21415 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
21416 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
21417 default: type
= _("<unknown>"); break;
21419 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21420 _("cannot represent %s relocation in this object file format"),
21427 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
21429 && fixp
->fx_addsy
== GOT_symbol
)
21431 code
= BFD_RELOC_ARM_GOTPC
;
21432 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
21436 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
21438 if (reloc
->howto
== NULL
)
21440 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21441 _("cannot represent %s relocation in this object file format"),
21442 bfd_get_reloc_code_name (code
));
21446 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21447 vtable entry to be used in the relocation's section offset. */
21448 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
21449 reloc
->address
= fixp
->fx_offset
;
21454 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
21457 cons_fix_new_arm (fragS
* frag
,
21462 bfd_reloc_code_real_type type
;
21466 FIXME: @@ Should look at CPU word size. */
21470 type
= BFD_RELOC_8
;
21473 type
= BFD_RELOC_16
;
21477 type
= BFD_RELOC_32
;
21480 type
= BFD_RELOC_64
;
21485 if (exp
->X_op
== O_secrel
)
21487 exp
->X_op
= O_symbol
;
21488 type
= BFD_RELOC_32_SECREL
;
21492 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
21495 #if defined (OBJ_COFF)
21497 arm_validate_fix (fixS
* fixP
)
21499 /* If the destination of the branch is a defined symbol which does not have
21500 the THUMB_FUNC attribute, then we must be calling a function which has
21501 the (interfacearm) attribute. We look for the Thumb entry point to that
21502 function and change the branch to refer to that function instead. */
21503 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
21504 && fixP
->fx_addsy
!= NULL
21505 && S_IS_DEFINED (fixP
->fx_addsy
)
21506 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
21508 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
21515 arm_force_relocation (struct fix
* fixp
)
21517 #if defined (OBJ_COFF) && defined (TE_PE)
21518 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
21522 /* In case we have a call or a branch to a function in ARM ISA mode from
21523 a thumb function or vice-versa force the relocation. These relocations
21524 are cleared off for some cores that might have blx and simple transformations
21528 switch (fixp
->fx_r_type
)
21530 case BFD_RELOC_ARM_PCREL_JUMP
:
21531 case BFD_RELOC_ARM_PCREL_CALL
:
21532 case BFD_RELOC_THUMB_PCREL_BLX
:
21533 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
21537 case BFD_RELOC_ARM_PCREL_BLX
:
21538 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21539 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21540 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21541 if (ARM_IS_FUNC (fixp
->fx_addsy
))
21550 /* Resolve these relocations even if the symbol is extern or weak. */
21551 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
21552 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
21553 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
21554 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
21555 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
21556 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
21557 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
21560 /* Always leave these relocations for the linker. */
21561 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
21562 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
21563 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
21566 /* Always generate relocations against function symbols. */
21567 if (fixp
->fx_r_type
== BFD_RELOC_32
21569 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
21572 return generic_force_reloc (fixp
);
21575 #if defined (OBJ_ELF) || defined (OBJ_COFF)
21576 /* Relocations against function names must be left unadjusted,
21577 so that the linker can use this information to generate interworking
21578 stubs. The MIPS version of this function
21579 also prevents relocations that are mips-16 specific, but I do not
21580 know why it does this.
21583 There is one other problem that ought to be addressed here, but
21584 which currently is not: Taking the address of a label (rather
21585 than a function) and then later jumping to that address. Such
21586 addresses also ought to have their bottom bit set (assuming that
21587 they reside in Thumb code), but at the moment they will not. */
21590 arm_fix_adjustable (fixS
* fixP
)
21592 if (fixP
->fx_addsy
== NULL
)
21595 /* Preserve relocations against symbols with function type. */
21596 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
21599 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
21600 && fixP
->fx_subsy
== NULL
)
21603 /* We need the symbol name for the VTABLE entries. */
21604 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
21605 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
21608 /* Don't allow symbols to be discarded on GOT related relocs. */
21609 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
21610 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
21611 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
21612 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
21613 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
21614 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
21615 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
21616 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
21617 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
21620 /* Similarly for group relocations. */
21621 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
21622 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
21623 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
21626 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21627 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
21628 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
21629 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
21630 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
21631 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
21632 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
21633 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
21634 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
21639 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21644 elf32_arm_target_format (void)
21647 return (target_big_endian
21648 ? "elf32-bigarm-symbian"
21649 : "elf32-littlearm-symbian");
21650 #elif defined (TE_VXWORKS)
21651 return (target_big_endian
21652 ? "elf32-bigarm-vxworks"
21653 : "elf32-littlearm-vxworks");
21655 if (target_big_endian
)
21656 return "elf32-bigarm";
21658 return "elf32-littlearm";
21663 armelf_frob_symbol (symbolS
* symp
,
21666 elf_frob_symbol (symp
, puntp
);
21670 /* MD interface: Finalization. */
21675 literal_pool
* pool
;
21677 /* Ensure that all the IT blocks are properly closed. */
21678 check_it_blocks_finished ();
21680 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
21682 /* Put it at the end of the relevant section. */
21683 subseg_set (pool
->section
, pool
->sub_section
);
21685 arm_elf_change_section ();
21692 /* Remove any excess mapping symbols generated for alignment frags in
21693 SEC. We may have created a mapping symbol before a zero byte
21694 alignment; remove it if there's a mapping symbol after the
21697 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
21698 void *dummy ATTRIBUTE_UNUSED
)
21700 segment_info_type
*seginfo
= seg_info (sec
);
21703 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
21706 for (fragp
= seginfo
->frchainP
->frch_root
;
21708 fragp
= fragp
->fr_next
)
21710 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
21711 fragS
*next
= fragp
->fr_next
;
21713 /* Variable-sized frags have been converted to fixed size by
21714 this point. But if this was variable-sized to start with,
21715 there will be a fixed-size frag after it. So don't handle
21717 if (sym
== NULL
|| next
== NULL
)
21720 if (S_GET_VALUE (sym
) < next
->fr_address
)
21721 /* Not at the end of this frag. */
21723 know (S_GET_VALUE (sym
) == next
->fr_address
);
21727 if (next
->tc_frag_data
.first_map
!= NULL
)
21729 /* Next frag starts with a mapping symbol. Discard this
21731 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21735 if (next
->fr_next
== NULL
)
21737 /* This mapping symbol is at the end of the section. Discard
21739 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
21740 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21744 /* As long as we have empty frags without any mapping symbols,
21746 /* If the next frag is non-empty and does not start with a
21747 mapping symbol, then this mapping symbol is required. */
21748 if (next
->fr_address
!= next
->fr_next
->fr_address
)
21751 next
= next
->fr_next
;
21753 while (next
!= NULL
);
21758 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21762 arm_adjust_symtab (void)
21767 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21769 if (ARM_IS_THUMB (sym
))
21771 if (THUMB_IS_FUNC (sym
))
21773 /* Mark the symbol as a Thumb function. */
21774 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
21775 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
21776 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
21778 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
21779 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
21781 as_bad (_("%s: unexpected function type: %d"),
21782 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
21784 else switch (S_GET_STORAGE_CLASS (sym
))
21787 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
21790 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
21793 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
21801 if (ARM_IS_INTERWORK (sym
))
21802 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
21809 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21811 if (ARM_IS_THUMB (sym
))
21813 elf_symbol_type
* elf_sym
;
21815 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
21816 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
21818 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
21819 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
21821 /* If it's a .thumb_func, declare it as so,
21822 otherwise tag label as .code 16. */
21823 if (THUMB_IS_FUNC (sym
))
21824 elf_sym
->internal_elf_sym
.st_info
=
21825 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
21826 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
21827 elf_sym
->internal_elf_sym
.st_info
=
21828 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
21833 /* Remove any overlapping mapping symbols generated by alignment frags. */
21834 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
21838 /* MD interface: Initialization. */
21841 set_constant_flonums (void)
21845 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
21846 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
21850 /* Auto-select Thumb mode if it's the only available instruction set for the
21851 given architecture. */
21854 autoselect_thumb_from_cpu_variant (void)
21856 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
21857 opcode_select (16);
21866 if ( (arm_ops_hsh
= hash_new ()) == NULL
21867 || (arm_cond_hsh
= hash_new ()) == NULL
21868 || (arm_shift_hsh
= hash_new ()) == NULL
21869 || (arm_psr_hsh
= hash_new ()) == NULL
21870 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
21871 || (arm_reg_hsh
= hash_new ()) == NULL
21872 || (arm_reloc_hsh
= hash_new ()) == NULL
21873 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
21874 as_fatal (_("virtual memory exhausted"));
21876 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
21877 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
21878 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
21879 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
21880 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
21881 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
21882 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
21883 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
21884 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
21885 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
21886 (void *) (v7m_psrs
+ i
));
21887 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
21888 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
21890 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
21892 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
21893 (void *) (barrier_opt_names
+ i
));
21895 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
21896 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (void *) (reloc_names
+ i
));
21899 set_constant_flonums ();
21901 /* Set the cpu variant based on the command-line options. We prefer
21902 -mcpu= over -march= if both are set (as for GCC); and we prefer
21903 -mfpu= over any other way of setting the floating point unit.
21904 Use of legacy options with new options are faulted. */
21907 if (mcpu_cpu_opt
|| march_cpu_opt
)
21908 as_bad (_("use of old and new-style options to set CPU type"));
21910 mcpu_cpu_opt
= legacy_cpu
;
21912 else if (!mcpu_cpu_opt
)
21913 mcpu_cpu_opt
= march_cpu_opt
;
21918 as_bad (_("use of old and new-style options to set FPU type"));
21920 mfpu_opt
= legacy_fpu
;
21922 else if (!mfpu_opt
)
21924 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21925 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21926 /* Some environments specify a default FPU. If they don't, infer it
21927 from the processor. */
21929 mfpu_opt
= mcpu_fpu_opt
;
21931 mfpu_opt
= march_fpu_opt
;
21933 mfpu_opt
= &fpu_default
;
21939 if (mcpu_cpu_opt
!= NULL
)
21940 mfpu_opt
= &fpu_default
;
21941 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
21942 mfpu_opt
= &fpu_arch_vfp_v2
;
21944 mfpu_opt
= &fpu_arch_fpa
;
21950 mcpu_cpu_opt
= &cpu_default
;
21951 selected_cpu
= cpu_default
;
21955 selected_cpu
= *mcpu_cpu_opt
;
21957 mcpu_cpu_opt
= &arm_arch_any
;
21960 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
21962 autoselect_thumb_from_cpu_variant ();
21964 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
21966 #if defined OBJ_COFF || defined OBJ_ELF
21968 unsigned int flags
= 0;
21970 #if defined OBJ_ELF
21971 flags
= meabi_flags
;
21973 switch (meabi_flags
)
21975 case EF_ARM_EABI_UNKNOWN
:
21977 /* Set the flags in the private structure. */
21978 if (uses_apcs_26
) flags
|= F_APCS26
;
21979 if (support_interwork
) flags
|= F_INTERWORK
;
21980 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
21981 if (pic_code
) flags
|= F_PIC
;
21982 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
21983 flags
|= F_SOFT_FLOAT
;
21985 switch (mfloat_abi_opt
)
21987 case ARM_FLOAT_ABI_SOFT
:
21988 case ARM_FLOAT_ABI_SOFTFP
:
21989 flags
|= F_SOFT_FLOAT
;
21992 case ARM_FLOAT_ABI_HARD
:
21993 if (flags
& F_SOFT_FLOAT
)
21994 as_bad (_("hard-float conflicts with specified fpu"));
21998 /* Using pure-endian doubles (even if soft-float). */
21999 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
22000 flags
|= F_VFP_FLOAT
;
22002 #if defined OBJ_ELF
22003 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
22004 flags
|= EF_ARM_MAVERICK_FLOAT
;
22007 case EF_ARM_EABI_VER4
:
22008 case EF_ARM_EABI_VER5
:
22009 /* No additional flags to set. */
22016 bfd_set_private_flags (stdoutput
, flags
);
22018 /* We have run out flags in the COFF header to encode the
22019 status of ATPCS support, so instead we create a dummy,
22020 empty, debug section called .arm.atpcs. */
22025 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
22029 bfd_set_section_flags
22030 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
22031 bfd_set_section_size (stdoutput
, sec
, 0);
22032 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
22038 /* Record the CPU type as well. */
22039 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
22040 mach
= bfd_mach_arm_iWMMXt2
;
22041 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
22042 mach
= bfd_mach_arm_iWMMXt
;
22043 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
22044 mach
= bfd_mach_arm_XScale
;
22045 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
22046 mach
= bfd_mach_arm_ep9312
;
22047 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
22048 mach
= bfd_mach_arm_5TE
;
22049 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
22051 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
22052 mach
= bfd_mach_arm_5T
;
22054 mach
= bfd_mach_arm_5
;
22056 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
22058 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
22059 mach
= bfd_mach_arm_4T
;
22061 mach
= bfd_mach_arm_4
;
22063 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
22064 mach
= bfd_mach_arm_3M
;
22065 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
22066 mach
= bfd_mach_arm_3
;
22067 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
22068 mach
= bfd_mach_arm_2a
;
22069 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
22070 mach
= bfd_mach_arm_2
;
22072 mach
= bfd_mach_arm_unknown
;
22074 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
22077 /* Command line processing. */
22080 Invocation line includes a switch not recognized by the base assembler.
22081 See if it's a processor-specific option.
22083 This routine is somewhat complicated by the need for backwards
22084 compatibility (since older releases of gcc can't be changed).
22085 The new options try to make the interface as compatible as
22088 New options (supported) are:
22090 -mcpu=<cpu name> Assemble for selected processor
22091 -march=<architecture name> Assemble for selected architecture
22092 -mfpu=<fpu architecture> Assemble for selected FPU.
22093 -EB/-mbig-endian Big-endian
22094 -EL/-mlittle-endian Little-endian
22095 -k Generate PIC code
22096 -mthumb Start in Thumb mode
22097 -mthumb-interwork Code supports ARM/Thumb interworking
22099 -m[no-]warn-deprecated Warn about deprecated features
22101 For now we will also provide support for:
22103 -mapcs-32 32-bit Program counter
22104 -mapcs-26 26-bit Program counter
22105 -macps-float Floats passed in FP registers
22106 -mapcs-reentrant Reentrant code
22108 (sometime these will probably be replaced with -mapcs=<list of options>
22109 and -matpcs=<list of options>)
22111 The remaining options are only supported for back-wards compatibility.
22112 Cpu variants, the arm part is optional:
22113 -m[arm]1 Currently not supported.
22114 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
22115 -m[arm]3 Arm 3 processor
22116 -m[arm]6[xx], Arm 6 processors
22117 -m[arm]7[xx][t][[d]m] Arm 7 processors
22118 -m[arm]8[10] Arm 8 processors
22119 -m[arm]9[20][tdmi] Arm 9 processors
22120 -mstrongarm[110[0]] StrongARM processors
22121 -mxscale XScale processors
22122 -m[arm]v[2345[t[e]]] Arm architectures
22123 -mall All (except the ARM1)
22125 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
22126 -mfpe-old (No float load/store multiples)
22127 -mvfpxd VFP Single precision
22129 -mno-fpu Disable all floating point instructions
22131 The following CPU names are recognized:
22132 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
22133 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
22134 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
22135 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
22136 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
22137 arm10t arm10e, arm1020t, arm1020e, arm10200e,
22138 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
22142 const char * md_shortopts
= "m:k";
22144 #ifdef ARM_BI_ENDIAN
22145 #define OPTION_EB (OPTION_MD_BASE + 0)
22146 #define OPTION_EL (OPTION_MD_BASE + 1)
22148 #if TARGET_BYTES_BIG_ENDIAN
22149 #define OPTION_EB (OPTION_MD_BASE + 0)
22151 #define OPTION_EL (OPTION_MD_BASE + 1)
22154 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
22156 struct option md_longopts
[] =
22159 {"EB", no_argument
, NULL
, OPTION_EB
},
22162 {"EL", no_argument
, NULL
, OPTION_EL
},
22164 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
22165 {NULL
, no_argument
, NULL
, 0}
22168 size_t md_longopts_size
= sizeof (md_longopts
);
22170 struct arm_option_table
22172 char *option
; /* Option name to match. */
22173 char *help
; /* Help information. */
22174 int *var
; /* Variable to change. */
22175 int value
; /* What to change it to. */
22176 char *deprecated
; /* If non-null, print this message. */
22179 struct arm_option_table arm_opts
[] =
22181 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
22182 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
22183 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
22184 &support_interwork
, 1, NULL
},
22185 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
22186 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
22187 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
22189 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
22190 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
22191 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
22192 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
22195 /* These are recognized by the assembler, but have no affect on code. */
22196 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
22197 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
22199 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
22200 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
22201 &warn_on_deprecated
, 0, NULL
},
22202 {NULL
, NULL
, NULL
, 0, NULL
}
22205 struct arm_legacy_option_table
22207 char *option
; /* Option name to match. */
22208 const arm_feature_set
**var
; /* Variable to change. */
22209 const arm_feature_set value
; /* What to change it to. */
22210 char *deprecated
; /* If non-null, print this message. */
22213 const struct arm_legacy_option_table arm_legacy_opts
[] =
22215 /* DON'T add any new processors to this list -- we want the whole list
22216 to go away... Add them to the processors table instead. */
22217 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
22218 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
22219 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
22220 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
22221 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
22222 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
22223 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
22224 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
22225 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
22226 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
22227 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
22228 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
22229 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
22230 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
22231 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
22232 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
22233 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
22234 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
22235 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
22236 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
22237 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
22238 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
22239 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
22240 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
22241 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
22242 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
22243 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
22244 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
22245 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
22246 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
22247 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
22248 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
22249 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
22250 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
22251 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
22252 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
22253 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
22254 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
22255 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
22256 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
22257 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
22258 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
22259 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
22260 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
22261 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
22262 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
22263 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22264 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22265 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22266 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22267 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
22268 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
22269 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
22270 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
22271 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
22272 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
22273 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
22274 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
22275 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
22276 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
22277 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
22278 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
22279 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
22280 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
22281 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
22282 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
22283 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
22284 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
22285 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
22286 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
22287 N_("use -mcpu=strongarm110")},
22288 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
22289 N_("use -mcpu=strongarm1100")},
22290 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
22291 N_("use -mcpu=strongarm1110")},
22292 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
22293 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
22294 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
22296 /* Architecture variants -- don't add any more to this list either. */
22297 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
22298 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
22299 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
22300 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
22301 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
22302 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
22303 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
22304 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
22305 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
22306 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
22307 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
22308 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
22309 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
22310 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
22311 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
22312 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
22313 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
22314 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
22316 /* Floating point variants -- don't add any more to this list either. */
22317 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
22318 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
22319 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
22320 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
22321 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
22323 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
22326 struct arm_cpu_option_table
22329 const arm_feature_set value
;
22330 /* For some CPUs we assume an FPU unless the user explicitly sets
22332 const arm_feature_set default_fpu
;
22333 /* The canonical name of the CPU, or NULL to use NAME converted to upper
22335 const char *canonical_name
;
22338 /* This list should, at a minimum, contain all the cpu names
22339 recognized by GCC. */
22340 static const struct arm_cpu_option_table arm_cpus
[] =
22342 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
22343 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
22344 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
22345 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
22346 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
22347 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22348 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22349 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22350 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22351 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22352 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22353 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
22354 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22355 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
22356 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22357 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
22358 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22359 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22360 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22361 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22362 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22363 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22364 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22365 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22366 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22367 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22368 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22369 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22370 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22371 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22372 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22373 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22374 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22375 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22376 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22377 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22378 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22379 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22380 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22381 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
22382 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22383 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22384 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22385 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22386 {"fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22387 {"fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22388 /* For V5 or later processors we default to using VFP; but the user
22389 should really set the FPU type explicitly. */
22390 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
22391 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22392 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
22393 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
22394 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
22395 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
22396 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
22397 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22398 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
22399 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
22400 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22401 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22402 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
22403 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
22404 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22405 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
22406 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
22407 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22408 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22409 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
22410 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
22411 {"fa626te", ARM_ARCH_V5TE
, FPU_NONE
, NULL
},
22412 {"fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22413 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
22414 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
22415 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
22416 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
22417 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"},
22418 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"},
22419 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
22420 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
22421 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
22422 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
22423 {"cortex-a5", ARM_ARCH_V7A_MP_SEC
,
22424 FPU_NONE
, "Cortex-A5"},
22425 {"cortex-a8", ARM_ARCH_V7A_SEC
,
22426 ARM_FEATURE (0, FPU_VFP_V3
22427 | FPU_NEON_EXT_V1
),
22429 {"cortex-a9", ARM_ARCH_V7A_MP_SEC
,
22430 ARM_FEATURE (0, FPU_VFP_V3
22431 | FPU_NEON_EXT_V1
),
22433 {"cortex-a15", ARM_ARCH_V7A_IDIV_MP_SEC
,
22434 FPU_ARCH_NEON_VFP_V4
,
22436 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"},
22437 {"cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
22439 {"cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"},
22440 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"},
22441 {"cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"},
22442 {"cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"},
22443 /* ??? XSCALE is really an architecture. */
22444 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
22445 /* ??? iwmmxt is not a processor. */
22446 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
22447 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
22448 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
22450 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
22451 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
22454 struct arm_arch_option_table
22457 const arm_feature_set value
;
22458 const arm_feature_set default_fpu
;
22461 /* This list should, at a minimum, contain all the architecture names
22462 recognized by GCC. */
22463 static const struct arm_arch_option_table arm_archs
[] =
22465 {"all", ARM_ANY
, FPU_ARCH_FPA
},
22466 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
22467 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
22468 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
22469 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
22470 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
22471 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
22472 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
22473 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
22474 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
22475 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
22476 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
22477 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
22478 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
22479 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
22480 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
22481 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
22482 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
22483 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
22484 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
22485 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
22486 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
22487 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
22488 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
22489 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
22490 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
22491 {"armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
},
22492 {"armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
},
22493 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
22494 /* The official spelling of the ARMv7 profile variants is the dashed form.
22495 Accept the non-dashed form for compatibility with old toolchains. */
22496 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
22497 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
22498 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
22499 {"armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
22500 {"armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
22501 {"armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
22502 {"armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
},
22503 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
22504 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
22505 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
22506 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
22509 /* ISA extensions in the co-processor and main instruction set space. */
22510 struct arm_option_extension_value_table
22513 const arm_feature_set value
;
22514 const arm_feature_set allowed_archs
;
22517 /* The following table must be in alphabetical order with a NULL last entry.
22519 static const struct arm_option_extension_value_table arm_extensions
[] =
22521 {"idiv", ARM_FEATURE (ARM_EXT_ADIV
| ARM_EXT_DIV
, 0),
22522 ARM_FEATURE (ARM_EXT_V7A
, 0)},
22523 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
), ARM_ANY
},
22524 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
), ARM_ANY
},
22525 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
), ARM_ANY
},
22526 {"mp", ARM_FEATURE (ARM_EXT_MP
, 0),
22527 ARM_FEATURE (ARM_EXT_V7A
| ARM_EXT_V7R
, 0)},
22528 {"os", ARM_FEATURE (ARM_EXT_OS
, 0),
22529 ARM_FEATURE (ARM_EXT_V6M
, 0)},
22530 {"sec", ARM_FEATURE (ARM_EXT_SEC
, 0),
22531 ARM_FEATURE (ARM_EXT_V6K
| ARM_EXT_V7A
, 0)},
22532 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
), ARM_ANY
},
22533 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
22536 /* ISA floating-point and Advanced SIMD extensions. */
22537 struct arm_option_fpu_value_table
22540 const arm_feature_set value
;
22543 /* This list should, at a minimum, contain all the fpu names
22544 recognized by GCC. */
22545 static const struct arm_option_fpu_value_table arm_fpus
[] =
22547 {"softfpa", FPU_NONE
},
22548 {"fpe", FPU_ARCH_FPE
},
22549 {"fpe2", FPU_ARCH_FPE
},
22550 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
22551 {"fpa", FPU_ARCH_FPA
},
22552 {"fpa10", FPU_ARCH_FPA
},
22553 {"fpa11", FPU_ARCH_FPA
},
22554 {"arm7500fe", FPU_ARCH_FPA
},
22555 {"softvfp", FPU_ARCH_VFP
},
22556 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
22557 {"vfp", FPU_ARCH_VFP_V2
},
22558 {"vfp9", FPU_ARCH_VFP_V2
},
22559 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
22560 {"vfp10", FPU_ARCH_VFP_V2
},
22561 {"vfp10-r0", FPU_ARCH_VFP_V1
},
22562 {"vfpxd", FPU_ARCH_VFP_V1xD
},
22563 {"vfpv2", FPU_ARCH_VFP_V2
},
22564 {"vfpv3", FPU_ARCH_VFP_V3
},
22565 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
22566 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
22567 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
22568 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
22569 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
22570 {"arm1020t", FPU_ARCH_VFP_V1
},
22571 {"arm1020e", FPU_ARCH_VFP_V2
},
22572 {"arm1136jfs", FPU_ARCH_VFP_V2
},
22573 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
22574 {"maverick", FPU_ARCH_MAVERICK
},
22575 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
22576 {"neon-fp16", FPU_ARCH_NEON_FP16
},
22577 {"vfpv4", FPU_ARCH_VFP_V4
},
22578 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
22579 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
22580 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
22581 {NULL
, ARM_ARCH_NONE
}
22584 struct arm_option_value_table
22590 static const struct arm_option_value_table arm_float_abis
[] =
22592 {"hard", ARM_FLOAT_ABI_HARD
},
22593 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
22594 {"soft", ARM_FLOAT_ABI_SOFT
},
22599 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
22600 static const struct arm_option_value_table arm_eabis
[] =
22602 {"gnu", EF_ARM_EABI_UNKNOWN
},
22603 {"4", EF_ARM_EABI_VER4
},
22604 {"5", EF_ARM_EABI_VER5
},
22609 struct arm_long_option_table
22611 char * option
; /* Substring to match. */
22612 char * help
; /* Help information. */
22613 int (* func
) (char * subopt
); /* Function to decode sub-option. */
22614 char * deprecated
; /* If non-null, print this message. */
22618 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
22620 arm_feature_set
*ext_set
= (arm_feature_set
*)
22621 xmalloc (sizeof (arm_feature_set
));
22623 /* We insist on extensions being specified in alphabetical order, and with
22624 extensions being added before being removed. We achieve this by having
22625 the global ARM_EXTENSIONS table in alphabetical order, and using the
22626 ADDING_VALUE variable to indicate whether we are adding an extension (1)
22627 or removing it (0) and only allowing it to change in the order
22629 const struct arm_option_extension_value_table
* opt
= NULL
;
22630 int adding_value
= -1;
22632 /* Copy the feature set, so that we can modify it. */
22633 *ext_set
= **opt_p
;
22636 while (str
!= NULL
&& *str
!= 0)
22643 as_bad (_("invalid architectural extension"));
22648 ext
= strchr (str
, '+');
22651 optlen
= ext
- str
;
22653 optlen
= strlen (str
);
22656 && strncmp (str
, "no", 2) == 0)
22658 if (adding_value
!= 0)
22661 opt
= arm_extensions
;
22667 else if (optlen
> 0)
22669 if (adding_value
== -1)
22672 opt
= arm_extensions
;
22674 else if (adding_value
!= 1)
22676 as_bad (_("must specify extensions to add before specifying "
22677 "those to remove"));
22684 as_bad (_("missing architectural extension"));
22688 gas_assert (adding_value
!= -1);
22689 gas_assert (opt
!= NULL
);
22691 /* Scan over the options table trying to find an exact match. */
22692 for (; opt
->name
!= NULL
; opt
++)
22693 if (strncmp (opt
->name
, str
, optlen
) == 0
22694 && strlen (opt
->name
) == optlen
)
22696 /* Check we can apply the extension to this architecture. */
22697 if (!ARM_CPU_HAS_FEATURE (*ext_set
, opt
->allowed_archs
))
22699 as_bad (_("extension does not apply to the base architecture"));
22703 /* Add or remove the extension. */
22705 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
22707 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->value
);
22712 if (opt
->name
== NULL
)
22714 /* Did we fail to find an extension because it wasn't specified in
22715 alphabetical order, or because it does not exist? */
22717 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
22718 if (strncmp (opt
->name
, str
, optlen
) == 0)
22721 if (opt
->name
== NULL
)
22722 as_bad (_("unknown architectural extension `%s'"), str
);
22724 as_bad (_("architectural extensions must be specified in "
22725 "alphabetical order"));
22731 /* We should skip the extension we've just matched the next time
22743 arm_parse_cpu (char * str
)
22745 const struct arm_cpu_option_table
* opt
;
22746 char * ext
= strchr (str
, '+');
22750 optlen
= ext
- str
;
22752 optlen
= strlen (str
);
22756 as_bad (_("missing cpu name `%s'"), str
);
22760 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
22761 if (strncmp (opt
->name
, str
, optlen
) == 0)
22763 mcpu_cpu_opt
= &opt
->value
;
22764 mcpu_fpu_opt
= &opt
->default_fpu
;
22765 if (opt
->canonical_name
)
22766 strcpy (selected_cpu_name
, opt
->canonical_name
);
22771 for (i
= 0; i
< optlen
; i
++)
22772 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
22773 selected_cpu_name
[i
] = 0;
22777 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
22782 as_bad (_("unknown cpu `%s'"), str
);
22787 arm_parse_arch (char * str
)
22789 const struct arm_arch_option_table
*opt
;
22790 char *ext
= strchr (str
, '+');
22794 optlen
= ext
- str
;
22796 optlen
= strlen (str
);
22800 as_bad (_("missing architecture name `%s'"), str
);
22804 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
22805 if (strncmp (opt
->name
, str
, optlen
) == 0)
22807 march_cpu_opt
= &opt
->value
;
22808 march_fpu_opt
= &opt
->default_fpu
;
22809 strcpy (selected_cpu_name
, opt
->name
);
22812 return arm_parse_extension (ext
, &march_cpu_opt
);
22817 as_bad (_("unknown architecture `%s'\n"), str
);
22822 arm_parse_fpu (char * str
)
22824 const struct arm_option_fpu_value_table
* opt
;
22826 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
22827 if (streq (opt
->name
, str
))
22829 mfpu_opt
= &opt
->value
;
22833 as_bad (_("unknown floating point format `%s'\n"), str
);
22838 arm_parse_float_abi (char * str
)
22840 const struct arm_option_value_table
* opt
;
22842 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
22843 if (streq (opt
->name
, str
))
22845 mfloat_abi_opt
= opt
->value
;
22849 as_bad (_("unknown floating point abi `%s'\n"), str
);
22855 arm_parse_eabi (char * str
)
22857 const struct arm_option_value_table
*opt
;
22859 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
22860 if (streq (opt
->name
, str
))
22862 meabi_flags
= opt
->value
;
22865 as_bad (_("unknown EABI `%s'\n"), str
);
22871 arm_parse_it_mode (char * str
)
22873 bfd_boolean ret
= TRUE
;
22875 if (streq ("arm", str
))
22876 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
22877 else if (streq ("thumb", str
))
22878 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
22879 else if (streq ("always", str
))
22880 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
22881 else if (streq ("never", str
))
22882 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
22885 as_bad (_("unknown implicit IT mode `%s', should be "\
22886 "arm, thumb, always, or never."), str
);
22893 struct arm_long_option_table arm_long_opts
[] =
22895 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22896 arm_parse_cpu
, NULL
},
22897 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22898 arm_parse_arch
, NULL
},
22899 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22900 arm_parse_fpu
, NULL
},
22901 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22902 arm_parse_float_abi
, NULL
},
22904 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22905 arm_parse_eabi
, NULL
},
22907 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22908 arm_parse_it_mode
, NULL
},
22909 {NULL
, NULL
, 0, NULL
}
22913 md_parse_option (int c
, char * arg
)
22915 struct arm_option_table
*opt
;
22916 const struct arm_legacy_option_table
*fopt
;
22917 struct arm_long_option_table
*lopt
;
22923 target_big_endian
= 1;
22929 target_big_endian
= 0;
22933 case OPTION_FIX_V4BX
:
22938 /* Listing option. Just ignore these, we don't support additional
22943 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
22945 if (c
== opt
->option
[0]
22946 && ((arg
== NULL
&& opt
->option
[1] == 0)
22947 || streq (arg
, opt
->option
+ 1)))
22949 /* If the option is deprecated, tell the user. */
22950 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
22951 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22952 arg
? arg
: "", _(opt
->deprecated
));
22954 if (opt
->var
!= NULL
)
22955 *opt
->var
= opt
->value
;
22961 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
22963 if (c
== fopt
->option
[0]
22964 && ((arg
== NULL
&& fopt
->option
[1] == 0)
22965 || streq (arg
, fopt
->option
+ 1)))
22967 /* If the option is deprecated, tell the user. */
22968 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
22969 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22970 arg
? arg
: "", _(fopt
->deprecated
));
22972 if (fopt
->var
!= NULL
)
22973 *fopt
->var
= &fopt
->value
;
22979 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
22981 /* These options are expected to have an argument. */
22982 if (c
== lopt
->option
[0]
22984 && strncmp (arg
, lopt
->option
+ 1,
22985 strlen (lopt
->option
+ 1)) == 0)
22987 /* If the option is deprecated, tell the user. */
22988 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
22989 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
22990 _(lopt
->deprecated
));
22992 /* Call the sup-option parser. */
22993 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
23004 md_show_usage (FILE * fp
)
23006 struct arm_option_table
*opt
;
23007 struct arm_long_option_table
*lopt
;
23009 fprintf (fp
, _(" ARM-specific assembler options:\n"));
23011 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
23012 if (opt
->help
!= NULL
)
23013 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
23015 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
23016 if (lopt
->help
!= NULL
)
23017 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
23021 -EB assemble code for a big-endian cpu\n"));
23026 -EL assemble code for a little-endian cpu\n"));
23030 --fix-v4bx Allow BX in ARMv4 code\n"));
23038 arm_feature_set flags
;
23039 } cpu_arch_ver_table
;
23041 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
23042 least features first. */
23043 static const cpu_arch_ver_table cpu_arch_ver
[] =
23049 {4, ARM_ARCH_V5TE
},
23050 {5, ARM_ARCH_V5TEJ
},
23054 {11, ARM_ARCH_V6M
},
23055 {12, ARM_ARCH_V6SM
},
23056 {8, ARM_ARCH_V6T2
},
23057 {10, ARM_ARCH_V7A
},
23058 {10, ARM_ARCH_V7R
},
23059 {10, ARM_ARCH_V7M
},
23063 /* Set an attribute if it has not already been set by the user. */
23065 aeabi_set_attribute_int (int tag
, int value
)
23068 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
23069 || !attributes_set_explicitly
[tag
])
23070 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
23074 aeabi_set_attribute_string (int tag
, const char *value
)
23077 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
23078 || !attributes_set_explicitly
[tag
])
23079 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
23082 /* Set the public EABI object attributes. */
23084 aeabi_set_public_attributes (void)
23087 arm_feature_set flags
;
23088 arm_feature_set tmp
;
23089 const cpu_arch_ver_table
*p
;
23091 /* Choose the architecture based on the capabilities of the requested cpu
23092 (if any) and/or the instructions actually used. */
23093 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
23094 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
23095 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
23096 /*Allow the user to override the reported architecture. */
23099 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
23100 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
23105 for (p
= cpu_arch_ver
; p
->val
; p
++)
23107 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
23110 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
23114 /* The table lookup above finds the last architecture to contribute
23115 a new feature. Unfortunately, Tag13 is a subset of the union of
23116 v6T2 and v7-M, so it is never seen as contributing a new feature.
23117 We can not search for the last entry which is entirely used,
23118 because if no CPU is specified we build up only those flags
23119 actually used. Perhaps we should separate out the specified
23120 and implicit cases. Avoid taking this path for -march=all by
23121 checking for contradictory v7-A / v7-M features. */
23123 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
23124 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
23125 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
23128 /* Tag_CPU_name. */
23129 if (selected_cpu_name
[0])
23133 q
= selected_cpu_name
;
23134 if (strncmp (q
, "armv", 4) == 0)
23139 for (i
= 0; q
[i
]; i
++)
23140 q
[i
] = TOUPPER (q
[i
]);
23142 aeabi_set_attribute_string (Tag_CPU_name
, q
);
23145 /* Tag_CPU_arch. */
23146 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
23148 /* Tag_CPU_arch_profile. */
23149 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
23150 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'A');
23151 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
23152 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'R');
23153 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
23154 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'M');
23156 /* Tag_ARM_ISA_use. */
23157 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
23159 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
23161 /* Tag_THUMB_ISA_use. */
23162 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
23164 aeabi_set_attribute_int (Tag_THUMB_ISA_use
,
23165 ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
) ? 2 : 1);
23167 /* Tag_VFP_arch. */
23168 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
23169 aeabi_set_attribute_int (Tag_VFP_arch
,
23170 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
23172 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
23173 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
23174 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
23175 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
23176 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
23177 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
23178 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
23179 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
23180 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
23182 /* Tag_ABI_HardFP_use. */
23183 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
23184 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
23185 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
23187 /* Tag_WMMX_arch. */
23188 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
23189 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
23190 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
23191 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
23193 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
23194 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
23195 aeabi_set_attribute_int
23196 (Tag_Advanced_SIMD_arch
, (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
)
23199 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
23200 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
))
23201 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
23204 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
))
23205 aeabi_set_attribute_int (Tag_DIV_use
, 2);
23206 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
))
23207 aeabi_set_attribute_int (Tag_DIV_use
, 0);
23209 aeabi_set_attribute_int (Tag_DIV_use
, 1);
23211 /* Tag_MP_extension_use. */
23212 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
23213 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
23215 /* Tag Virtualization_use. */
23216 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
23217 aeabi_set_attribute_int (Tag_Virtualization_use
, 1);
23220 /* Add the default contents for the .ARM.attributes section. */
23224 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
23227 aeabi_set_public_attributes ();
23229 #endif /* OBJ_ELF */
23232 /* Parse a .cpu directive. */
23235 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
23237 const struct arm_cpu_option_table
*opt
;
23241 name
= input_line_pointer
;
23242 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
23243 input_line_pointer
++;
23244 saved_char
= *input_line_pointer
;
23245 *input_line_pointer
= 0;
23247 /* Skip the first "all" entry. */
23248 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
23249 if (streq (opt
->name
, name
))
23251 mcpu_cpu_opt
= &opt
->value
;
23252 selected_cpu
= opt
->value
;
23253 if (opt
->canonical_name
)
23254 strcpy (selected_cpu_name
, opt
->canonical_name
);
23258 for (i
= 0; opt
->name
[i
]; i
++)
23259 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
23260 selected_cpu_name
[i
] = 0;
23262 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
23263 *input_line_pointer
= saved_char
;
23264 demand_empty_rest_of_line ();
23267 as_bad (_("unknown cpu `%s'"), name
);
23268 *input_line_pointer
= saved_char
;
23269 ignore_rest_of_line ();
23273 /* Parse a .arch directive. */
23276 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
23278 const struct arm_arch_option_table
*opt
;
23282 name
= input_line_pointer
;
23283 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
23284 input_line_pointer
++;
23285 saved_char
= *input_line_pointer
;
23286 *input_line_pointer
= 0;
23288 /* Skip the first "all" entry. */
23289 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
23290 if (streq (opt
->name
, name
))
23292 mcpu_cpu_opt
= &opt
->value
;
23293 selected_cpu
= opt
->value
;
23294 strcpy (selected_cpu_name
, opt
->name
);
23295 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
23296 *input_line_pointer
= saved_char
;
23297 demand_empty_rest_of_line ();
23301 as_bad (_("unknown architecture `%s'\n"), name
);
23302 *input_line_pointer
= saved_char
;
23303 ignore_rest_of_line ();
23307 /* Parse a .object_arch directive. */
23310 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
23312 const struct arm_arch_option_table
*opt
;
23316 name
= input_line_pointer
;
23317 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
23318 input_line_pointer
++;
23319 saved_char
= *input_line_pointer
;
23320 *input_line_pointer
= 0;
23322 /* Skip the first "all" entry. */
23323 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
23324 if (streq (opt
->name
, name
))
23326 object_arch
= &opt
->value
;
23327 *input_line_pointer
= saved_char
;
23328 demand_empty_rest_of_line ();
23332 as_bad (_("unknown architecture `%s'\n"), name
);
23333 *input_line_pointer
= saved_char
;
23334 ignore_rest_of_line ();
23337 /* Parse a .arch_extension directive. */
23340 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
23342 const struct arm_option_extension_value_table
*opt
;
23345 int adding_value
= 1;
23347 name
= input_line_pointer
;
23348 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
23349 input_line_pointer
++;
23350 saved_char
= *input_line_pointer
;
23351 *input_line_pointer
= 0;
23353 if (strlen (name
) >= 2
23354 && strncmp (name
, "no", 2) == 0)
23360 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
23361 if (streq (opt
->name
, name
))
23363 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt
, opt
->allowed_archs
))
23365 as_bad (_("architectural extension `%s' is not allowed for the "
23366 "current base architecture"), name
);
23371 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
, opt
->value
);
23373 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->value
);
23375 mcpu_cpu_opt
= &selected_cpu
;
23376 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
23377 *input_line_pointer
= saved_char
;
23378 demand_empty_rest_of_line ();
23382 if (opt
->name
== NULL
)
23383 as_bad (_("unknown architecture `%s'\n"), name
);
23385 *input_line_pointer
= saved_char
;
23386 ignore_rest_of_line ();
23389 /* Parse a .fpu directive. */
23392 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
23394 const struct arm_option_fpu_value_table
*opt
;
23398 name
= input_line_pointer
;
23399 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
23400 input_line_pointer
++;
23401 saved_char
= *input_line_pointer
;
23402 *input_line_pointer
= 0;
23404 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
23405 if (streq (opt
->name
, name
))
23407 mfpu_opt
= &opt
->value
;
23408 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
23409 *input_line_pointer
= saved_char
;
23410 demand_empty_rest_of_line ();
23414 as_bad (_("unknown floating point format `%s'\n"), name
);
23415 *input_line_pointer
= saved_char
;
23416 ignore_rest_of_line ();
23419 /* Copy symbol information. */
23422 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
23424 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
23428 /* Given a symbolic attribute NAME, return the proper integer value.
23429 Returns -1 if the attribute is not known. */
23432 arm_convert_symbolic_attribute (const char *name
)
23434 static const struct
23439 attribute_table
[] =
23441 /* When you modify this table you should
23442 also modify the list in doc/c-arm.texi. */
23443 #define T(tag) {#tag, tag}
23444 T (Tag_CPU_raw_name
),
23447 T (Tag_CPU_arch_profile
),
23448 T (Tag_ARM_ISA_use
),
23449 T (Tag_THUMB_ISA_use
),
23453 T (Tag_Advanced_SIMD_arch
),
23454 T (Tag_PCS_config
),
23455 T (Tag_ABI_PCS_R9_use
),
23456 T (Tag_ABI_PCS_RW_data
),
23457 T (Tag_ABI_PCS_RO_data
),
23458 T (Tag_ABI_PCS_GOT_use
),
23459 T (Tag_ABI_PCS_wchar_t
),
23460 T (Tag_ABI_FP_rounding
),
23461 T (Tag_ABI_FP_denormal
),
23462 T (Tag_ABI_FP_exceptions
),
23463 T (Tag_ABI_FP_user_exceptions
),
23464 T (Tag_ABI_FP_number_model
),
23465 T (Tag_ABI_align_needed
),
23466 T (Tag_ABI_align8_needed
),
23467 T (Tag_ABI_align_preserved
),
23468 T (Tag_ABI_align8_preserved
),
23469 T (Tag_ABI_enum_size
),
23470 T (Tag_ABI_HardFP_use
),
23471 T (Tag_ABI_VFP_args
),
23472 T (Tag_ABI_WMMX_args
),
23473 T (Tag_ABI_optimization_goals
),
23474 T (Tag_ABI_FP_optimization_goals
),
23475 T (Tag_compatibility
),
23476 T (Tag_CPU_unaligned_access
),
23477 T (Tag_FP_HP_extension
),
23478 T (Tag_VFP_HP_extension
),
23479 T (Tag_ABI_FP_16bit_format
),
23480 T (Tag_MPextension_use
),
23482 T (Tag_nodefaults
),
23483 T (Tag_also_compatible_with
),
23484 T (Tag_conformance
),
23486 T (Tag_Virtualization_use
),
23487 /* We deliberately do not include Tag_MPextension_use_legacy. */
23495 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
23496 if (streq (name
, attribute_table
[i
].name
))
23497 return attribute_table
[i
].tag
;
23503 /* Apply sym value for relocations only in the case that
23504 they are for local symbols and you have the respective
23505 architectural feature for blx and simple switches. */
23507 arm_apply_sym_value (struct fix
* fixP
)
23510 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23511 && !S_IS_EXTERNAL (fixP
->fx_addsy
))
23513 switch (fixP
->fx_r_type
)
23515 case BFD_RELOC_ARM_PCREL_BLX
:
23516 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23517 if (ARM_IS_FUNC (fixP
->fx_addsy
))
23521 case BFD_RELOC_ARM_PCREL_CALL
:
23522 case BFD_RELOC_THUMB_PCREL_BLX
:
23523 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
23534 #endif /* OBJ_ELF */